diff options
Diffstat (limited to 'drivers')
536 files changed, 9969 insertions, 4877 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 694d5a70d6ce..c70d6e45dc10 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig" | |||
134 | 134 | ||
135 | source "drivers/platform/Kconfig" | 135 | source "drivers/platform/Kconfig" |
136 | 136 | ||
137 | source "drivers/soc/Kconfig" | ||
138 | |||
139 | source "drivers/clk/Kconfig" | 137 | source "drivers/clk/Kconfig" |
140 | 138 | ||
141 | source "drivers/hwspinlock/Kconfig" | 139 | source "drivers/hwspinlock/Kconfig" |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 8951cefb0a96..e6c3ddd92665 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -315,6 +315,12 @@ config ACPI_HOTPLUG_MEMORY | |||
315 | To compile this driver as a module, choose M here: | 315 | To compile this driver as a module, choose M here: |
316 | the module will be called acpi_memhotplug. | 316 | the module will be called acpi_memhotplug. |
317 | 317 | ||
318 | config ACPI_HOTPLUG_IOAPIC | ||
319 | bool | ||
320 | depends on PCI | ||
321 | depends on X86_IO_APIC | ||
322 | default y | ||
323 | |||
318 | config ACPI_SBS | 324 | config ACPI_SBS |
319 | tristate "Smart Battery System" | 325 | tristate "Smart Battery System" |
320 | depends on X86 | 326 | depends on X86 |
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index f74317cc1ca9..b18cd2151ddb 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -40,7 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o | |||
40 | acpi-y += ec.o | 40 | acpi-y += ec.o |
41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 41 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
42 | acpi-y += pci_root.o pci_link.o pci_irq.o | 42 | acpi-y += pci_root.o pci_link.o pci_irq.o |
43 | acpi-y += acpi_lpss.o | 43 | acpi-y += acpi_lpss.o acpi_apd.o |
44 | acpi-y += acpi_platform.o | 44 | acpi-y += acpi_platform.o |
45 | acpi-y += acpi_pnp.o | 45 | acpi-y += acpi_pnp.o |
46 | acpi-y += int340x_thermal.o | 46 | acpi-y += int340x_thermal.o |
@@ -70,6 +70,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o | |||
70 | obj-y += container.o | 70 | obj-y += container.o |
71 | obj-$(CONFIG_ACPI_THERMAL) += thermal.o | 71 | obj-$(CONFIG_ACPI_THERMAL) += thermal.o |
72 | obj-y += acpi_memhotplug.o | 72 | obj-y += acpi_memhotplug.o |
73 | obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o | ||
73 | obj-$(CONFIG_ACPI_BATTERY) += battery.o | 74 | obj-$(CONFIG_ACPI_BATTERY) += battery.o |
74 | obj-$(CONFIG_ACPI_SBS) += sbshc.o | 75 | obj-$(CONFIG_ACPI_SBS) += sbshc.o |
75 | obj-$(CONFIG_ACPI_SBS) += sbs.o | 76 | obj-$(CONFIG_ACPI_SBS) += sbs.o |
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c new file mode 100644 index 000000000000..3984ea96e5f7 --- /dev/null +++ b/drivers/acpi/acpi_apd.c | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * AMD ACPI support for ACPI2platform device. | ||
3 | * | ||
4 | * Copyright (c) 2014,2015 AMD Corporation. | ||
5 | * Authors: Ken Xue <Ken.Xue@amd.com> | ||
6 | * Wu, Jeff <Jeff.Wu@amd.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/clk-provider.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/pm_domain.h> | ||
16 | #include <linux/clkdev.h> | ||
17 | #include <linux/acpi.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/clk.h> | ||
20 | #include <linux/pm.h> | ||
21 | |||
22 | #include "internal.h" | ||
23 | |||
24 | ACPI_MODULE_NAME("acpi_apd"); | ||
25 | struct apd_private_data; | ||
26 | |||
27 | /** | ||
28 | * ACPI_APD_SYSFS : add device attributes in sysfs | ||
29 | * ACPI_APD_PM : attach power domain to device | ||
30 | */ | ||
31 | #define ACPI_APD_SYSFS BIT(0) | ||
32 | #define ACPI_APD_PM BIT(1) | ||
33 | |||
34 | /** | ||
35 | * struct apd_device_desc - a descriptor for apd device | ||
36 | * @flags: device flags like %ACPI_APD_SYSFS, %ACPI_APD_PM | ||
37 | * @fixed_clk_rate: fixed rate input clock source for acpi device; | ||
38 | * 0 means no fixed rate input clock source | ||
39 | * @setup: a hook routine to set device resource during create platform device | ||
40 | * | ||
41 | * Device description defined as acpi_device_id.driver_data | ||
42 | */ | ||
43 | struct apd_device_desc { | ||
44 | unsigned int flags; | ||
45 | unsigned int fixed_clk_rate; | ||
46 | int (*setup)(struct apd_private_data *pdata); | ||
47 | }; | ||
48 | |||
49 | struct apd_private_data { | ||
50 | struct clk *clk; | ||
51 | struct acpi_device *adev; | ||
52 | const struct apd_device_desc *dev_desc; | ||
53 | }; | ||
54 | |||
55 | #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE | ||
56 | #define APD_ADDR(desc) ((unsigned long)&desc) | ||
57 | |||
58 | static int acpi_apd_setup(struct apd_private_data *pdata) | ||
59 | { | ||
60 | const struct apd_device_desc *dev_desc = pdata->dev_desc; | ||
61 | struct clk *clk = ERR_PTR(-ENODEV); | ||
62 | |||
63 | if (dev_desc->fixed_clk_rate) { | ||
64 | clk = clk_register_fixed_rate(&pdata->adev->dev, | ||
65 | dev_name(&pdata->adev->dev), | ||
66 | NULL, CLK_IS_ROOT, | ||
67 | dev_desc->fixed_clk_rate); | ||
68 | clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev)); | ||
69 | pdata->clk = clk; | ||
70 | } | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static struct apd_device_desc cz_i2c_desc = { | ||
76 | .setup = acpi_apd_setup, | ||
77 | .fixed_clk_rate = 133000000, | ||
78 | }; | ||
79 | |||
80 | static struct apd_device_desc cz_uart_desc = { | ||
81 | .setup = acpi_apd_setup, | ||
82 | .fixed_clk_rate = 48000000, | ||
83 | }; | ||
84 | |||
85 | #else | ||
86 | |||
87 | #define APD_ADDR(desc) (0UL) | ||
88 | |||
89 | #endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */ | ||
90 | |||
91 | /** | ||
92 | * Create platform device during acpi scan attach handle. | ||
93 | * Return value > 0 on success of creating device. | ||
94 | */ | ||
95 | static int acpi_apd_create_device(struct acpi_device *adev, | ||
96 | const struct acpi_device_id *id) | ||
97 | { | ||
98 | const struct apd_device_desc *dev_desc = (void *)id->driver_data; | ||
99 | struct apd_private_data *pdata; | ||
100 | struct platform_device *pdev; | ||
101 | int ret; | ||
102 | |||
103 | if (!dev_desc) { | ||
104 | pdev = acpi_create_platform_device(adev); | ||
105 | return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; | ||
106 | } | ||
107 | |||
108 | pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); | ||
109 | if (!pdata) | ||
110 | return -ENOMEM; | ||
111 | |||
112 | pdata->adev = adev; | ||
113 | pdata->dev_desc = dev_desc; | ||
114 | |||
115 | if (dev_desc->setup) { | ||
116 | ret = dev_desc->setup(pdata); | ||
117 | if (ret) | ||
118 | goto err_out; | ||
119 | } | ||
120 | |||
121 | adev->driver_data = pdata; | ||
122 | pdev = acpi_create_platform_device(adev); | ||
123 | if (!IS_ERR_OR_NULL(pdev)) | ||
124 | return 1; | ||
125 | |||
126 | ret = PTR_ERR(pdev); | ||
127 | adev->driver_data = NULL; | ||
128 | |||
129 | err_out: | ||
130 | kfree(pdata); | ||
131 | return ret; | ||
132 | } | ||
133 | |||
134 | static const struct acpi_device_id acpi_apd_device_ids[] = { | ||
135 | /* Generic apd devices */ | ||
136 | { "AMD0010", APD_ADDR(cz_i2c_desc) }, | ||
137 | { "AMD0020", APD_ADDR(cz_uart_desc) }, | ||
138 | { "AMD0030", }, | ||
139 | { } | ||
140 | }; | ||
141 | |||
142 | static struct acpi_scan_handler apd_handler = { | ||
143 | .ids = acpi_apd_device_ids, | ||
144 | .attach = acpi_apd_create_device, | ||
145 | }; | ||
146 | |||
147 | void __init acpi_apd_init(void) | ||
148 | { | ||
149 | acpi_scan_add_handler(&apd_handler); | ||
150 | } | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 4f3febf8a589..02e835f3cf8a 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * ACPI support for Intel Lynxpoint LPSS. | 2 | * ACPI support for Intel Lynxpoint LPSS. |
3 | * | 3 | * |
4 | * Copyright (C) 2013, 2014, Intel Corporation | 4 | * Copyright (C) 2013, Intel Corporation |
5 | * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> | 5 | * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> |
6 | * Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 6 | * Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
7 | * | 7 | * |
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss"); | |||
60 | #define LPSS_CLK_DIVIDER BIT(2) | 60 | #define LPSS_CLK_DIVIDER BIT(2) |
61 | #define LPSS_LTR BIT(3) | 61 | #define LPSS_LTR BIT(3) |
62 | #define LPSS_SAVE_CTX BIT(4) | 62 | #define LPSS_SAVE_CTX BIT(4) |
63 | #define LPSS_DEV_PROXY BIT(5) | ||
64 | #define LPSS_PROXY_REQ BIT(6) | ||
65 | 63 | ||
66 | struct lpss_private_data; | 64 | struct lpss_private_data; |
67 | 65 | ||
@@ -72,10 +70,8 @@ struct lpss_device_desc { | |||
72 | void (*setup)(struct lpss_private_data *pdata); | 70 | void (*setup)(struct lpss_private_data *pdata); |
73 | }; | 71 | }; |
74 | 72 | ||
75 | static struct device *proxy_device; | ||
76 | |||
77 | static struct lpss_device_desc lpss_dma_desc = { | 73 | static struct lpss_device_desc lpss_dma_desc = { |
78 | .flags = LPSS_CLK | LPSS_PROXY_REQ, | 74 | .flags = LPSS_CLK, |
79 | }; | 75 | }; |
80 | 76 | ||
81 | struct lpss_private_data { | 77 | struct lpss_private_data { |
@@ -129,7 +125,7 @@ static struct lpss_device_desc lpt_dev_desc = { | |||
129 | }; | 125 | }; |
130 | 126 | ||
131 | static struct lpss_device_desc lpt_i2c_dev_desc = { | 127 | static struct lpss_device_desc lpt_i2c_dev_desc = { |
132 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, | 128 | .flags = LPSS_CLK | LPSS_LTR, |
133 | .prv_offset = 0x800, | 129 | .prv_offset = 0x800, |
134 | }; | 130 | }; |
135 | 131 | ||
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = { | |||
150 | }; | 146 | }; |
151 | 147 | ||
152 | static struct lpss_device_desc byt_uart_dev_desc = { | 148 | static struct lpss_device_desc byt_uart_dev_desc = { |
153 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | | 149 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
154 | LPSS_DEV_PROXY, | ||
155 | .prv_offset = 0x800, | 150 | .prv_offset = 0x800, |
156 | .setup = lpss_uart_setup, | 151 | .setup = lpss_uart_setup, |
157 | }; | 152 | }; |
158 | 153 | ||
159 | static struct lpss_device_desc byt_spi_dev_desc = { | 154 | static struct lpss_device_desc byt_spi_dev_desc = { |
160 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX | | 155 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
161 | LPSS_DEV_PROXY, | ||
162 | .prv_offset = 0x400, | 156 | .prv_offset = 0x400, |
163 | }; | 157 | }; |
164 | 158 | ||
165 | static struct lpss_device_desc byt_sdio_dev_desc = { | 159 | static struct lpss_device_desc byt_sdio_dev_desc = { |
166 | .flags = LPSS_CLK | LPSS_DEV_PROXY, | 160 | .flags = LPSS_CLK, |
167 | }; | 161 | }; |
168 | 162 | ||
169 | static struct lpss_device_desc byt_i2c_dev_desc = { | 163 | static struct lpss_device_desc byt_i2c_dev_desc = { |
170 | .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY, | 164 | .flags = LPSS_CLK | LPSS_SAVE_CTX, |
171 | .prv_offset = 0x800, | 165 | .prv_offset = 0x800, |
172 | .setup = byt_i2c_setup, | 166 | .setup = byt_i2c_setup, |
173 | }; | 167 | }; |
@@ -313,7 +307,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
313 | { | 307 | { |
314 | struct lpss_device_desc *dev_desc; | 308 | struct lpss_device_desc *dev_desc; |
315 | struct lpss_private_data *pdata; | 309 | struct lpss_private_data *pdata; |
316 | struct resource_list_entry *rentry; | 310 | struct resource_entry *rentry; |
317 | struct list_head resource_list; | 311 | struct list_head resource_list; |
318 | struct platform_device *pdev; | 312 | struct platform_device *pdev; |
319 | int ret; | 313 | int ret; |
@@ -333,13 +327,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
333 | goto err_out; | 327 | goto err_out; |
334 | 328 | ||
335 | list_for_each_entry(rentry, &resource_list, node) | 329 | list_for_each_entry(rentry, &resource_list, node) |
336 | if (resource_type(&rentry->res) == IORESOURCE_MEM) { | 330 | if (resource_type(rentry->res) == IORESOURCE_MEM) { |
337 | if (dev_desc->prv_size_override) | 331 | if (dev_desc->prv_size_override) |
338 | pdata->mmio_size = dev_desc->prv_size_override; | 332 | pdata->mmio_size = dev_desc->prv_size_override; |
339 | else | 333 | else |
340 | pdata->mmio_size = resource_size(&rentry->res); | 334 | pdata->mmio_size = resource_size(rentry->res); |
341 | pdata->mmio_base = ioremap(rentry->res.start, | 335 | pdata->mmio_base = ioremap(rentry->res->start, |
342 | pdata->mmio_size); | 336 | pdata->mmio_size); |
337 | if (!pdata->mmio_base) | ||
338 | goto err_out; | ||
343 | break; | 339 | break; |
344 | } | 340 | } |
345 | 341 | ||
@@ -374,8 +370,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
374 | adev->driver_data = pdata; | 370 | adev->driver_data = pdata; |
375 | pdev = acpi_create_platform_device(adev); | 371 | pdev = acpi_create_platform_device(adev); |
376 | if (!IS_ERR_OR_NULL(pdev)) { | 372 | if (!IS_ERR_OR_NULL(pdev)) { |
377 | if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY) | ||
378 | proxy_device = &pdev->dev; | ||
379 | return 1; | 373 | return 1; |
380 | } | 374 | } |
381 | 375 | ||
@@ -600,14 +594,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev) | |||
600 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX) | 594 | if (pdata->dev_desc->flags & LPSS_SAVE_CTX) |
601 | acpi_lpss_save_ctx(dev, pdata); | 595 | acpi_lpss_save_ctx(dev, pdata); |
602 | 596 | ||
603 | ret = acpi_dev_runtime_suspend(dev); | 597 | return acpi_dev_runtime_suspend(dev); |
604 | if (ret) | ||
605 | return ret; | ||
606 | |||
607 | if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) | ||
608 | return pm_runtime_put_sync_suspend(proxy_device); | ||
609 | |||
610 | return 0; | ||
611 | } | 598 | } |
612 | 599 | ||
613 | static int acpi_lpss_runtime_resume(struct device *dev) | 600 | static int acpi_lpss_runtime_resume(struct device *dev) |
@@ -615,12 +602,6 @@ static int acpi_lpss_runtime_resume(struct device *dev) | |||
615 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); | 602 | struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); |
616 | int ret; | 603 | int ret; |
617 | 604 | ||
618 | if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) { | ||
619 | ret = pm_runtime_get_sync(proxy_device); | ||
620 | if (ret) | ||
621 | return ret; | ||
622 | } | ||
623 | |||
624 | ret = acpi_dev_runtime_resume(dev); | 605 | ret = acpi_dev_runtime_resume(dev); |
625 | if (ret) | 606 | if (ret) |
626 | return ret; | 607 | return ret; |
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 23e2319ead41..ee28f4d15625 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c | |||
@@ -101,8 +101,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context) | |||
101 | /* Can we combine the resource range information? */ | 101 | /* Can we combine the resource range information? */ |
102 | if ((info->caching == address64.info.mem.caching) && | 102 | if ((info->caching == address64.info.mem.caching) && |
103 | (info->write_protect == address64.info.mem.write_protect) && | 103 | (info->write_protect == address64.info.mem.write_protect) && |
104 | (info->start_addr + info->length == address64.minimum)) { | 104 | (info->start_addr + info->length == address64.address.minimum)) { |
105 | info->length += address64.address_length; | 105 | info->length += address64.address.address_length; |
106 | return AE_OK; | 106 | return AE_OK; |
107 | } | 107 | } |
108 | } | 108 | } |
@@ -114,8 +114,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context) | |||
114 | INIT_LIST_HEAD(&new->list); | 114 | INIT_LIST_HEAD(&new->list); |
115 | new->caching = address64.info.mem.caching; | 115 | new->caching = address64.info.mem.caching; |
116 | new->write_protect = address64.info.mem.write_protect; | 116 | new->write_protect = address64.info.mem.write_protect; |
117 | new->start_addr = address64.minimum; | 117 | new->start_addr = address64.address.minimum; |
118 | new->length = address64.address_length; | 118 | new->length = address64.address.address_length; |
119 | list_add_tail(&new->list, &mem_device->res_list); | 119 | list_add_tail(&new->list, &mem_device->res_list); |
120 | 120 | ||
121 | return AE_OK; | 121 | return AE_OK; |
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 6ba8beb6b9d2..1284138e42ab 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c | |||
@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) | |||
45 | struct platform_device *pdev = NULL; | 45 | struct platform_device *pdev = NULL; |
46 | struct acpi_device *acpi_parent; | 46 | struct acpi_device *acpi_parent; |
47 | struct platform_device_info pdevinfo; | 47 | struct platform_device_info pdevinfo; |
48 | struct resource_list_entry *rentry; | 48 | struct resource_entry *rentry; |
49 | struct list_head resource_list; | 49 | struct list_head resource_list; |
50 | struct resource *resources = NULL; | 50 | struct resource *resources = NULL; |
51 | int count; | 51 | int count; |
@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) | |||
71 | } | 71 | } |
72 | count = 0; | 72 | count = 0; |
73 | list_for_each_entry(rentry, &resource_list, node) | 73 | list_for_each_entry(rentry, &resource_list, node) |
74 | resources[count++] = rentry->res; | 74 | resources[count++] = *rentry->res; |
75 | 75 | ||
76 | acpi_dev_free_resource_list(&resource_list); | 76 | acpi_dev_free_resource_list(&resource_list); |
77 | } | 77 | } |
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index 3d2c88289da9..d863016565b5 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -47,7 +47,7 @@ | |||
47 | /* Common info for tool signons */ | 47 | /* Common info for tool signons */ |
48 | 48 | ||
49 | #define ACPICA_NAME "Intel ACPI Component Architecture" | 49 | #define ACPICA_NAME "Intel ACPI Component Architecture" |
50 | #define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2014 Intel Corporation" | 50 | #define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2015 Intel Corporation" |
51 | 51 | ||
52 | #if ACPI_MACHINE_WIDTH == 64 | 52 | #if ACPI_MACHINE_WIDTH == 64 |
53 | #define ACPI_WIDTH "-64" | 53 | #define ACPI_WIDTH "-64" |
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index 6f1c616910ac..853aa2dbdb61 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 1d026ff1683f..4169bb87a996 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index d3e2cc395d7f..408f04bcaab4 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index 7a7811a9fc26..228704b78657 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -143,8 +143,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, | |||
143 | acpi_status | 143 | acpi_status |
144 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); | 144 | acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); |
145 | 145 | ||
146 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); | ||
147 | |||
148 | acpi_status | 146 | acpi_status |
149 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | 147 | acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
150 | struct acpi_gpe_block_info *gpe_block, void *context); | 148 | struct acpi_gpe_block_info *gpe_block, void *context); |
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 7f60582d0c8c..a165d25343e8 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index c318d3e27893..196a55244559 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index b01f71ce0523..1886bde54b5d 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 680d23bbae7c..7add32e5d8c5 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 4bceb11c7380..cf607fe69dbd 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index ee1c040f321c..952fbe0b7231 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 8abb393dafab..3e9720e1f34f 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index dda0e6affcf1..a5f17de45ac6 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 6168b85463ed..74a390c6db16 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index bd3908d26c4f..a972d11c97c9 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index 4b008e8884a1..efc4c7124ccc 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index cf7346110bd8..d14b547b7cd5 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index 1afe46e44dac..1c127a43017b 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 486d342e74b6..c2f03e8774ad 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 5908ccec6aea..3a95068fc119 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h | |||
@@ -7,7 +7,7 @@ | |||
7 | *****************************************************************************/ | 7 | *****************************************************************************/ |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Copyright (C) 2000 - 2014, Intel Corp. | 10 | * Copyright (C) 2000 - 2015, Intel Corp. |
11 | * All rights reserved. | 11 | * All rights reserved. |
12 | * | 12 | * |
13 | * Redistribution and use in source and binary forms, with or without | 13 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 3a0beeb86ba5..ee0cdd60b93d 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 720b1cdda711..3e6989738e85 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 8daf9de82b73..39da9da62bbf 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index c57666196672..43b40de90484 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index aee5e45f6d35..bbe74bcebbae 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 3c7f7378b94d..d72565a3c646 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index b67522df01ac..2e4c42b377ec 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index a1e7e6b6fcf7..8a7b07b6adc8 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 6c0759c0db47..77244182ff02 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 9f74795e2268..e5ff89bcb3f5 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index f7f5107e754d..df54d46225cd 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 15623da26200..843942fb4be5 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 2ac28d297305..fcaa30c611fb 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 9d6e2c1de1f8..43b3ea40c0b6 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index 24f7d5ea678a..89ac2022465e 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index c7bffff9ed32..bf6873f95e72 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 3393a73ca0d6..b78dc7c6d5d7 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index aa70154cf4fa..5ed064e8673c 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -114,17 +114,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) | |||
114 | 114 | ||
115 | ACPI_FUNCTION_TRACE(ev_enable_gpe); | 115 | ACPI_FUNCTION_TRACE(ev_enable_gpe); |
116 | 116 | ||
117 | /* | ||
118 | * We will only allow a GPE to be enabled if it has either an associated | ||
119 | * method (_Lxx/_Exx) or a handler, or is using the implicit notify | ||
120 | * feature. Otherwise, the GPE will be immediately disabled by | ||
121 | * acpi_ev_gpe_dispatch the first time it fires. | ||
122 | */ | ||
123 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | ||
124 | ACPI_GPE_DISPATCH_NONE) { | ||
125 | return_ACPI_STATUS(AE_NO_HANDLER); | ||
126 | } | ||
127 | |||
128 | /* Clear the GPE (of stale events) */ | 117 | /* Clear the GPE (of stale events) */ |
129 | 118 | ||
130 | status = acpi_hw_clear_gpe(gpe_event_info); | 119 | status = acpi_hw_clear_gpe(gpe_event_info); |
@@ -339,7 +328,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) | |||
339 | { | 328 | { |
340 | acpi_status status; | 329 | acpi_status status; |
341 | struct acpi_gpe_block_info *gpe_block; | 330 | struct acpi_gpe_block_info *gpe_block; |
331 | struct acpi_namespace_node *gpe_device; | ||
342 | struct acpi_gpe_register_info *gpe_register_info; | 332 | struct acpi_gpe_register_info *gpe_register_info; |
333 | struct acpi_gpe_event_info *gpe_event_info; | ||
334 | u32 gpe_number; | ||
335 | struct acpi_gpe_handler_info *gpe_handler_info; | ||
343 | u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; | 336 | u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; |
344 | u8 enabled_status_byte; | 337 | u8 enabled_status_byte; |
345 | u32 status_reg; | 338 | u32 status_reg; |
@@ -367,6 +360,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) | |||
367 | 360 | ||
368 | gpe_block = gpe_xrupt_list->gpe_block_list_head; | 361 | gpe_block = gpe_xrupt_list->gpe_block_list_head; |
369 | while (gpe_block) { | 362 | while (gpe_block) { |
363 | gpe_device = gpe_block->node; | ||
364 | |||
370 | /* | 365 | /* |
371 | * Read all of the 8-bit GPE status and enable registers in this GPE | 366 | * Read all of the 8-bit GPE status and enable registers in this GPE |
372 | * block, saving all of them. Find all currently active GP events. | 367 | * block, saving all of them. Find all currently active GP events. |
@@ -442,16 +437,68 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) | |||
442 | 437 | ||
443 | /* Examine one GPE bit */ | 438 | /* Examine one GPE bit */ |
444 | 439 | ||
440 | gpe_event_info = | ||
441 | &gpe_block-> | ||
442 | event_info[((acpi_size) i * | ||
443 | ACPI_GPE_REGISTER_WIDTH) + j]; | ||
444 | gpe_number = | ||
445 | j + gpe_register_info->base_gpe_number; | ||
446 | |||
445 | if (enabled_status_byte & (1 << j)) { | 447 | if (enabled_status_byte & (1 << j)) { |
446 | /* | 448 | |
447 | * Found an active GPE. Dispatch the event to a handler | 449 | /* Invoke global event handler if present */ |
448 | * or method. | 450 | |
449 | */ | 451 | acpi_gpe_count++; |
450 | int_status |= | 452 | if (acpi_gbl_global_event_handler) { |
451 | acpi_ev_gpe_dispatch(gpe_block-> | 453 | acpi_gbl_global_event_handler |
452 | node, | 454 | (ACPI_EVENT_TYPE_GPE, |
453 | &gpe_block-> | 455 | gpe_device, gpe_number, |
454 | event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); | 456 | acpi_gbl_global_event_handler_context); |
457 | } | ||
458 | |||
459 | /* Found an active GPE */ | ||
460 | |||
461 | if (ACPI_GPE_DISPATCH_TYPE | ||
462 | (gpe_event_info->flags) == | ||
463 | ACPI_GPE_DISPATCH_RAW_HANDLER) { | ||
464 | |||
465 | /* Dispatch the event to a raw handler */ | ||
466 | |||
467 | gpe_handler_info = | ||
468 | gpe_event_info->dispatch. | ||
469 | handler; | ||
470 | |||
471 | /* | ||
472 | * There is no protection around the namespace node | ||
473 | * and the GPE handler to ensure a safe destruction | ||
474 | * because: | ||
475 | * 1. The namespace node is expected to always | ||
476 | * exist after loading a table. | ||
477 | * 2. The GPE handler is expected to be flushed by | ||
478 | * acpi_os_wait_events_complete() before the | ||
479 | * destruction. | ||
480 | */ | ||
481 | acpi_os_release_lock | ||
482 | (acpi_gbl_gpe_lock, flags); | ||
483 | int_status |= | ||
484 | gpe_handler_info-> | ||
485 | address(gpe_device, | ||
486 | gpe_number, | ||
487 | gpe_handler_info-> | ||
488 | context); | ||
489 | flags = | ||
490 | acpi_os_acquire_lock | ||
491 | (acpi_gbl_gpe_lock); | ||
492 | } else { | ||
493 | /* | ||
494 | * Dispatch the event to a standard handler or | ||
495 | * method. | ||
496 | */ | ||
497 | int_status |= | ||
498 | acpi_ev_gpe_dispatch | ||
499 | (gpe_device, gpe_event_info, | ||
500 | gpe_number); | ||
501 | } | ||
455 | } | 502 | } |
456 | } | 503 | } |
457 | } | 504 | } |
@@ -484,52 +531,15 @@ unlock_and_exit: | |||
484 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | 531 | static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) |
485 | { | 532 | { |
486 | struct acpi_gpe_event_info *gpe_event_info = context; | 533 | struct acpi_gpe_event_info *gpe_event_info = context; |
487 | acpi_status status; | 534 | acpi_status status = AE_OK; |
488 | struct acpi_gpe_event_info *local_gpe_event_info; | ||
489 | struct acpi_evaluate_info *info; | 535 | struct acpi_evaluate_info *info; |
490 | struct acpi_gpe_notify_info *notify; | 536 | struct acpi_gpe_notify_info *notify; |
491 | 537 | ||
492 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | 538 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
493 | 539 | ||
494 | /* Allocate a local GPE block */ | ||
495 | |||
496 | local_gpe_event_info = | ||
497 | ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); | ||
498 | if (!local_gpe_event_info) { | ||
499 | ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); | ||
500 | return_VOID; | ||
501 | } | ||
502 | |||
503 | status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); | ||
504 | if (ACPI_FAILURE(status)) { | ||
505 | ACPI_FREE(local_gpe_event_info); | ||
506 | return_VOID; | ||
507 | } | ||
508 | |||
509 | /* Must revalidate the gpe_number/gpe_block */ | ||
510 | |||
511 | if (!acpi_ev_valid_gpe_event(gpe_event_info)) { | ||
512 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
513 | ACPI_FREE(local_gpe_event_info); | ||
514 | return_VOID; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * Take a snapshot of the GPE info for this level - we copy the info to | ||
519 | * prevent a race condition with remove_handler/remove_block. | ||
520 | */ | ||
521 | ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, | ||
522 | sizeof(struct acpi_gpe_event_info)); | ||
523 | |||
524 | status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); | ||
525 | if (ACPI_FAILURE(status)) { | ||
526 | ACPI_FREE(local_gpe_event_info); | ||
527 | return_VOID; | ||
528 | } | ||
529 | |||
530 | /* Do the correct dispatch - normal method or implicit notify */ | 540 | /* Do the correct dispatch - normal method or implicit notify */ |
531 | 541 | ||
532 | switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { | 542 | switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) { |
533 | case ACPI_GPE_DISPATCH_NOTIFY: | 543 | case ACPI_GPE_DISPATCH_NOTIFY: |
534 | /* | 544 | /* |
535 | * Implicit notify. | 545 | * Implicit notify. |
@@ -542,7 +552,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
542 | * June 2012: Expand implicit notify mechanism to support | 552 | * June 2012: Expand implicit notify mechanism to support |
543 | * notifies on multiple device objects. | 553 | * notifies on multiple device objects. |
544 | */ | 554 | */ |
545 | notify = local_gpe_event_info->dispatch.notify_list; | 555 | notify = gpe_event_info->dispatch.notify_list; |
546 | while (ACPI_SUCCESS(status) && notify) { | 556 | while (ACPI_SUCCESS(status) && notify) { |
547 | status = | 557 | status = |
548 | acpi_ev_queue_notify_request(notify->device_node, | 558 | acpi_ev_queue_notify_request(notify->device_node, |
@@ -566,7 +576,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
566 | * _Lxx/_Exx control method that corresponds to this GPE | 576 | * _Lxx/_Exx control method that corresponds to this GPE |
567 | */ | 577 | */ |
568 | info->prefix_node = | 578 | info->prefix_node = |
569 | local_gpe_event_info->dispatch.method_node; | 579 | gpe_event_info->dispatch.method_node; |
570 | info->flags = ACPI_IGNORE_RETURN_VALUE; | 580 | info->flags = ACPI_IGNORE_RETURN_VALUE; |
571 | 581 | ||
572 | status = acpi_ns_evaluate(info); | 582 | status = acpi_ns_evaluate(info); |
@@ -576,25 +586,27 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
576 | if (ACPI_FAILURE(status)) { | 586 | if (ACPI_FAILURE(status)) { |
577 | ACPI_EXCEPTION((AE_INFO, status, | 587 | ACPI_EXCEPTION((AE_INFO, status, |
578 | "while evaluating GPE method [%4.4s]", | 588 | "while evaluating GPE method [%4.4s]", |
579 | acpi_ut_get_node_name | 589 | acpi_ut_get_node_name(gpe_event_info-> |
580 | (local_gpe_event_info->dispatch. | 590 | dispatch. |
581 | method_node))); | 591 | method_node))); |
582 | } | 592 | } |
583 | break; | 593 | break; |
584 | 594 | ||
585 | default: | 595 | default: |
586 | 596 | ||
587 | return_VOID; /* Should never happen */ | 597 | goto error_exit; /* Should never happen */ |
588 | } | 598 | } |
589 | 599 | ||
590 | /* Defer enabling of GPE until all notify handlers are done */ | 600 | /* Defer enabling of GPE until all notify handlers are done */ |
591 | 601 | ||
592 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, | 602 | status = acpi_os_execute(OSL_NOTIFY_HANDLER, |
593 | acpi_ev_asynch_enable_gpe, | 603 | acpi_ev_asynch_enable_gpe, gpe_event_info); |
594 | local_gpe_event_info); | 604 | if (ACPI_SUCCESS(status)) { |
595 | if (ACPI_FAILURE(status)) { | 605 | return_VOID; |
596 | ACPI_FREE(local_gpe_event_info); | ||
597 | } | 606 | } |
607 | |||
608 | error_exit: | ||
609 | acpi_ev_asynch_enable_gpe(gpe_event_info); | ||
598 | return_VOID; | 610 | return_VOID; |
599 | } | 611 | } |
600 | 612 | ||
@@ -622,7 +634,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) | |||
622 | (void)acpi_ev_finish_gpe(gpe_event_info); | 634 | (void)acpi_ev_finish_gpe(gpe_event_info); |
623 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 635 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
624 | 636 | ||
625 | ACPI_FREE(gpe_event_info); | ||
626 | return; | 637 | return; |
627 | } | 638 | } |
628 | 639 | ||
@@ -692,15 +703,6 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, | |||
692 | 703 | ||
693 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); | 704 | ACPI_FUNCTION_TRACE(ev_gpe_dispatch); |
694 | 705 | ||
695 | /* Invoke global event handler if present */ | ||
696 | |||
697 | acpi_gpe_count++; | ||
698 | if (acpi_gbl_global_event_handler) { | ||
699 | acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, | ||
700 | gpe_number, | ||
701 | acpi_gbl_global_event_handler_context); | ||
702 | } | ||
703 | |||
704 | /* | 706 | /* |
705 | * Always disable the GPE so that it does not keep firing before | 707 | * Always disable the GPE so that it does not keep firing before |
706 | * any asynchronous activity completes (either from the execution | 708 | * any asynchronous activity completes (either from the execution |
@@ -741,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, | |||
741 | * If there is neither a handler nor a method, leave the GPE | 743 | * If there is neither a handler nor a method, leave the GPE |
742 | * disabled. | 744 | * disabled. |
743 | */ | 745 | */ |
744 | switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { | 746 | switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) { |
745 | case ACPI_GPE_DISPATCH_HANDLER: | 747 | case ACPI_GPE_DISPATCH_HANDLER: |
746 | 748 | ||
747 | /* Invoke the installed handler (at interrupt level) */ | 749 | /* Invoke the installed handler (at interrupt level) */ |
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index d86699eea33c..e0f24c504513 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -474,10 +474,12 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
474 | * Ignore GPEs that have no corresponding _Lxx/_Exx method | 474 | * Ignore GPEs that have no corresponding _Lxx/_Exx method |
475 | * and GPEs that are used to wake the system | 475 | * and GPEs that are used to wake the system |
476 | */ | 476 | */ |
477 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 477 | if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
478 | ACPI_GPE_DISPATCH_NONE) | 478 | ACPI_GPE_DISPATCH_NONE) |
479 | || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) | 479 | || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
480 | == ACPI_GPE_DISPATCH_HANDLER) | 480 | ACPI_GPE_DISPATCH_HANDLER) |
481 | || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == | ||
482 | ACPI_GPE_DISPATCH_RAW_HANDLER) | ||
481 | || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { | 483 | || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { |
482 | continue; | 484 | continue; |
483 | } | 485 | } |
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 7be928379879..8840296d5b20 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -401,15 +401,17 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle, | |||
401 | return_ACPI_STATUS(AE_OK); | 401 | return_ACPI_STATUS(AE_OK); |
402 | } | 402 | } |
403 | 403 | ||
404 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 404 | if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
405 | ACPI_GPE_DISPATCH_HANDLER) { | 405 | ACPI_GPE_DISPATCH_HANDLER) || |
406 | (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == | ||
407 | ACPI_GPE_DISPATCH_RAW_HANDLER)) { | ||
406 | 408 | ||
407 | /* If there is already a handler, ignore this GPE method */ | 409 | /* If there is already a handler, ignore this GPE method */ |
408 | 410 | ||
409 | return_ACPI_STATUS(AE_OK); | 411 | return_ACPI_STATUS(AE_OK); |
410 | } | 412 | } |
411 | 413 | ||
412 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 414 | if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
413 | ACPI_GPE_DISPATCH_METHOD) { | 415 | ACPI_GPE_DISPATCH_METHOD) { |
414 | /* | 416 | /* |
415 | * If there is already a method, ignore this method. But check | 417 | * If there is already a method, ignore this method. But check |
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 17e4bbfdb096..3a958f3612fe 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -108,53 +108,6 @@ unlock_and_exit: | |||
108 | 108 | ||
109 | /******************************************************************************* | 109 | /******************************************************************************* |
110 | * | 110 | * |
111 | * FUNCTION: acpi_ev_valid_gpe_event | ||
112 | * | ||
113 | * PARAMETERS: gpe_event_info - Info for this GPE | ||
114 | * | ||
115 | * RETURN: TRUE if the gpe_event is valid | ||
116 | * | ||
117 | * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL. | ||
118 | * Should be called only when the GPE lists are semaphore locked | ||
119 | * and not subject to change. | ||
120 | * | ||
121 | ******************************************************************************/ | ||
122 | |||
123 | u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info) | ||
124 | { | ||
125 | struct acpi_gpe_xrupt_info *gpe_xrupt_block; | ||
126 | struct acpi_gpe_block_info *gpe_block; | ||
127 | |||
128 | ACPI_FUNCTION_ENTRY(); | ||
129 | |||
130 | /* No need for spin lock since we are not changing any list elements */ | ||
131 | |||
132 | /* Walk the GPE interrupt levels */ | ||
133 | |||
134 | gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head; | ||
135 | while (gpe_xrupt_block) { | ||
136 | gpe_block = gpe_xrupt_block->gpe_block_list_head; | ||
137 | |||
138 | /* Walk the GPE blocks on this interrupt level */ | ||
139 | |||
140 | while (gpe_block) { | ||
141 | if ((&gpe_block->event_info[0] <= gpe_event_info) && | ||
142 | (&gpe_block->event_info[gpe_block->gpe_count] > | ||
143 | gpe_event_info)) { | ||
144 | return (TRUE); | ||
145 | } | ||
146 | |||
147 | gpe_block = gpe_block->next; | ||
148 | } | ||
149 | |||
150 | gpe_xrupt_block = gpe_xrupt_block->next; | ||
151 | } | ||
152 | |||
153 | return (FALSE); | ||
154 | } | ||
155 | |||
156 | /******************************************************************************* | ||
157 | * | ||
158 | * FUNCTION: acpi_ev_get_gpe_device | 111 | * FUNCTION: acpi_ev_get_gpe_device |
159 | * | 112 | * |
160 | * PARAMETERS: GPE_WALK_CALLBACK | 113 | * PARAMETERS: GPE_WALK_CALLBACK |
@@ -371,8 +324,10 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
371 | ACPI_GPE_REGISTER_WIDTH) | 324 | ACPI_GPE_REGISTER_WIDTH) |
372 | + j]; | 325 | + j]; |
373 | 326 | ||
374 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 327 | if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
375 | ACPI_GPE_DISPATCH_HANDLER) { | 328 | ACPI_GPE_DISPATCH_HANDLER) || |
329 | (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == | ||
330 | ACPI_GPE_DISPATCH_RAW_HANDLER)) { | ||
376 | 331 | ||
377 | /* Delete an installed handler block */ | 332 | /* Delete an installed handler block */ |
378 | 333 | ||
@@ -380,10 +335,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
380 | gpe_event_info->dispatch.handler = NULL; | 335 | gpe_event_info->dispatch.handler = NULL; |
381 | gpe_event_info->flags &= | 336 | gpe_event_info->flags &= |
382 | ~ACPI_GPE_DISPATCH_MASK; | 337 | ~ACPI_GPE_DISPATCH_MASK; |
383 | } else | 338 | } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) |
384 | if ((gpe_event_info-> | 339 | == ACPI_GPE_DISPATCH_NOTIFY) { |
385 | flags & ACPI_GPE_DISPATCH_MASK) == | ||
386 | ACPI_GPE_DISPATCH_NOTIFY) { | ||
387 | 340 | ||
388 | /* Delete the implicit notification device list */ | 341 | /* Delete the implicit notification device list */ |
389 | 342 | ||
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 78ac29351c9e..74e8595f5a2b 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index 24ea3424981b..f7c9dfe7b990 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 8eb8575e8c16..9abace3401f9 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 1b148a440d67..da323390bb70 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c index 29630e303829..0366703d2970 100644 --- a/drivers/acpi/acpica/evsci.c +++ b/drivers/acpi/acpica/evsci.c | |||
@@ -6,7 +6,7 @@ | |||
6 | ******************************************************************************/ | 6 | ******************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 55a58f3ec8df..81f2d9e87fad 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -51,6 +51,16 @@ | |||
51 | 51 | ||
52 | #define _COMPONENT ACPI_EVENTS | 52 | #define _COMPONENT ACPI_EVENTS |
53 | ACPI_MODULE_NAME("evxface") | 53 | ACPI_MODULE_NAME("evxface") |
54 | #if (!ACPI_REDUCED_HARDWARE) | ||
55 | /* Local prototypes */ | ||
56 | static acpi_status | ||
57 | acpi_ev_install_gpe_handler(acpi_handle gpe_device, | ||
58 | u32 gpe_number, | ||
59 | u32 type, | ||
60 | u8 is_raw_handler, | ||
61 | acpi_gpe_handler address, void *context); | ||
62 | |||
63 | #endif | ||
54 | 64 | ||
55 | 65 | ||
56 | /******************************************************************************* | 66 | /******************************************************************************* |
@@ -76,6 +86,7 @@ ACPI_MODULE_NAME("evxface") | |||
76 | * handlers. | 86 | * handlers. |
77 | * | 87 | * |
78 | ******************************************************************************/ | 88 | ******************************************************************************/ |
89 | |||
79 | acpi_status | 90 | acpi_status |
80 | acpi_install_notify_handler(acpi_handle device, | 91 | acpi_install_notify_handler(acpi_handle device, |
81 | u32 handler_type, | 92 | u32 handler_type, |
@@ -717,32 +728,37 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler) | |||
717 | 728 | ||
718 | /******************************************************************************* | 729 | /******************************************************************************* |
719 | * | 730 | * |
720 | * FUNCTION: acpi_install_gpe_handler | 731 | * FUNCTION: acpi_ev_install_gpe_handler |
721 | * | 732 | * |
722 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT | 733 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT |
723 | * defined GPEs) | 734 | * defined GPEs) |
724 | * gpe_number - The GPE number within the GPE block | 735 | * gpe_number - The GPE number within the GPE block |
725 | * type - Whether this GPE should be treated as an | 736 | * type - Whether this GPE should be treated as an |
726 | * edge- or level-triggered interrupt. | 737 | * edge- or level-triggered interrupt. |
738 | * is_raw_handler - Whether this GPE should be handled using | ||
739 | * the special GPE handler mode. | ||
727 | * address - Address of the handler | 740 | * address - Address of the handler |
728 | * context - Value passed to the handler on each GPE | 741 | * context - Value passed to the handler on each GPE |
729 | * | 742 | * |
730 | * RETURN: Status | 743 | * RETURN: Status |
731 | * | 744 | * |
732 | * DESCRIPTION: Install a handler for a General Purpose Event. | 745 | * DESCRIPTION: Internal function to install a handler for a General Purpose |
746 | * Event. | ||
733 | * | 747 | * |
734 | ******************************************************************************/ | 748 | ******************************************************************************/ |
735 | acpi_status | 749 | static acpi_status |
736 | acpi_install_gpe_handler(acpi_handle gpe_device, | 750 | acpi_ev_install_gpe_handler(acpi_handle gpe_device, |
737 | u32 gpe_number, | 751 | u32 gpe_number, |
738 | u32 type, acpi_gpe_handler address, void *context) | 752 | u32 type, |
753 | u8 is_raw_handler, | ||
754 | acpi_gpe_handler address, void *context) | ||
739 | { | 755 | { |
740 | struct acpi_gpe_event_info *gpe_event_info; | 756 | struct acpi_gpe_event_info *gpe_event_info; |
741 | struct acpi_gpe_handler_info *handler; | 757 | struct acpi_gpe_handler_info *handler; |
742 | acpi_status status; | 758 | acpi_status status; |
743 | acpi_cpu_flags flags; | 759 | acpi_cpu_flags flags; |
744 | 760 | ||
745 | ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); | 761 | ACPI_FUNCTION_TRACE(ev_install_gpe_handler); |
746 | 762 | ||
747 | /* Parameter validation */ | 763 | /* Parameter validation */ |
748 | 764 | ||
@@ -775,8 +791,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
775 | 791 | ||
776 | /* Make sure that there isn't a handler there already */ | 792 | /* Make sure that there isn't a handler there already */ |
777 | 793 | ||
778 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 794 | if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
779 | ACPI_GPE_DISPATCH_HANDLER) { | 795 | ACPI_GPE_DISPATCH_HANDLER) || |
796 | (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == | ||
797 | ACPI_GPE_DISPATCH_RAW_HANDLER)) { | ||
780 | status = AE_ALREADY_EXISTS; | 798 | status = AE_ALREADY_EXISTS; |
781 | goto free_and_exit; | 799 | goto free_and_exit; |
782 | } | 800 | } |
@@ -793,9 +811,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
793 | * automatically during initialization, in which case it has to be | 811 | * automatically during initialization, in which case it has to be |
794 | * disabled now to avoid spurious execution of the handler. | 812 | * disabled now to avoid spurious execution of the handler. |
795 | */ | 813 | */ |
796 | if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || | 814 | if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) == |
797 | (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && | 815 | ACPI_GPE_DISPATCH_METHOD) || |
798 | gpe_event_info->runtime_count) { | 816 | (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) == |
817 | ACPI_GPE_DISPATCH_NOTIFY)) && gpe_event_info->runtime_count) { | ||
799 | handler->originally_enabled = TRUE; | 818 | handler->originally_enabled = TRUE; |
800 | (void)acpi_ev_remove_gpe_reference(gpe_event_info); | 819 | (void)acpi_ev_remove_gpe_reference(gpe_event_info); |
801 | 820 | ||
@@ -816,7 +835,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device, | |||
816 | 835 | ||
817 | gpe_event_info->flags &= | 836 | gpe_event_info->flags &= |
818 | ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); | 837 | ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); |
819 | gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER); | 838 | gpe_event_info->flags |= |
839 | (u8)(type | | ||
840 | (is_raw_handler ? ACPI_GPE_DISPATCH_RAW_HANDLER : | ||
841 | ACPI_GPE_DISPATCH_HANDLER)); | ||
820 | 842 | ||
821 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 843 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
822 | 844 | ||
@@ -830,10 +852,78 @@ free_and_exit: | |||
830 | goto unlock_and_exit; | 852 | goto unlock_and_exit; |
831 | } | 853 | } |
832 | 854 | ||
855 | /******************************************************************************* | ||
856 | * | ||
857 | * FUNCTION: acpi_install_gpe_handler | ||
858 | * | ||
859 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT | ||
860 | * defined GPEs) | ||
861 | * gpe_number - The GPE number within the GPE block | ||
862 | * type - Whether this GPE should be treated as an | ||
863 | * edge- or level-triggered interrupt. | ||
864 | * address - Address of the handler | ||
865 | * context - Value passed to the handler on each GPE | ||
866 | * | ||
867 | * RETURN: Status | ||
868 | * | ||
869 | * DESCRIPTION: Install a handler for a General Purpose Event. | ||
870 | * | ||
871 | ******************************************************************************/ | ||
872 | |||
873 | acpi_status | ||
874 | acpi_install_gpe_handler(acpi_handle gpe_device, | ||
875 | u32 gpe_number, | ||
876 | u32 type, acpi_gpe_handler address, void *context) | ||
877 | { | ||
878 | acpi_status status; | ||
879 | |||
880 | ACPI_FUNCTION_TRACE(acpi_install_gpe_handler); | ||
881 | |||
882 | status = | ||
883 | acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, FALSE, | ||
884 | address, context); | ||
885 | |||
886 | return_ACPI_STATUS(status); | ||
887 | } | ||
888 | |||
833 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) | 889 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler) |
834 | 890 | ||
835 | /******************************************************************************* | 891 | /******************************************************************************* |
836 | * | 892 | * |
893 | * FUNCTION: acpi_install_gpe_raw_handler | ||
894 | * | ||
895 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT | ||
896 | * defined GPEs) | ||
897 | * gpe_number - The GPE number within the GPE block | ||
898 | * type - Whether this GPE should be treated as an | ||
899 | * edge- or level-triggered interrupt. | ||
900 | * address - Address of the handler | ||
901 | * context - Value passed to the handler on each GPE | ||
902 | * | ||
903 | * RETURN: Status | ||
904 | * | ||
905 | * DESCRIPTION: Install a handler for a General Purpose Event. | ||
906 | * | ||
907 | ******************************************************************************/ | ||
908 | acpi_status | ||
909 | acpi_install_gpe_raw_handler(acpi_handle gpe_device, | ||
910 | u32 gpe_number, | ||
911 | u32 type, acpi_gpe_handler address, void *context) | ||
912 | { | ||
913 | acpi_status status; | ||
914 | |||
915 | ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler); | ||
916 | |||
917 | status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, TRUE, | ||
918 | address, context); | ||
919 | |||
920 | return_ACPI_STATUS(status); | ||
921 | } | ||
922 | |||
923 | ACPI_EXPORT_SYMBOL(acpi_install_gpe_raw_handler) | ||
924 | |||
925 | /******************************************************************************* | ||
926 | * | ||
837 | * FUNCTION: acpi_remove_gpe_handler | 927 | * FUNCTION: acpi_remove_gpe_handler |
838 | * | 928 | * |
839 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT | 929 | * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT |
@@ -880,8 +970,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
880 | 970 | ||
881 | /* Make sure that a handler is indeed installed */ | 971 | /* Make sure that a handler is indeed installed */ |
882 | 972 | ||
883 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != | 973 | if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != |
884 | ACPI_GPE_DISPATCH_HANDLER) { | 974 | ACPI_GPE_DISPATCH_HANDLER) && |
975 | (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != | ||
976 | ACPI_GPE_DISPATCH_RAW_HANDLER)) { | ||
885 | status = AE_NOT_EXIST; | 977 | status = AE_NOT_EXIST; |
886 | goto unlock_and_exit; | 978 | goto unlock_and_exit; |
887 | } | 979 | } |
@@ -896,6 +988,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
896 | /* Remove the handler */ | 988 | /* Remove the handler */ |
897 | 989 | ||
898 | handler = gpe_event_info->dispatch.handler; | 990 | handler = gpe_event_info->dispatch.handler; |
991 | gpe_event_info->dispatch.handler = NULL; | ||
899 | 992 | ||
900 | /* Restore Method node (if any), set dispatch flags */ | 993 | /* Restore Method node (if any), set dispatch flags */ |
901 | 994 | ||
@@ -909,9 +1002,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device, | |||
909 | * enabled, it should be enabled at this point to restore the | 1002 | * enabled, it should be enabled at this point to restore the |
910 | * post-initialization configuration. | 1003 | * post-initialization configuration. |
911 | */ | 1004 | */ |
912 | if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || | 1005 | if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) == |
913 | (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && | 1006 | ACPI_GPE_DISPATCH_METHOD) || |
914 | handler->originally_enabled) { | 1007 | (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) == |
1008 | ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) { | ||
915 | (void)acpi_ev_add_gpe_reference(gpe_event_info); | 1009 | (void)acpi_ev_add_gpe_reference(gpe_event_info); |
916 | } | 1010 | } |
917 | 1011 | ||
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index bb8cbf5961bf..df06a23c4197 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index e889a5304abd..70eb47e3d724 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -132,7 +132,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
132 | */ | 132 | */ |
133 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | 133 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); |
134 | if (gpe_event_info) { | 134 | if (gpe_event_info) { |
135 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != | 135 | if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != |
136 | ACPI_GPE_DISPATCH_NONE) { | 136 | ACPI_GPE_DISPATCH_NONE) { |
137 | status = acpi_ev_add_gpe_reference(gpe_event_info); | 137 | status = acpi_ev_add_gpe_reference(gpe_event_info); |
138 | } else { | 138 | } else { |
@@ -183,6 +183,77 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) | |||
183 | 183 | ||
184 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) | 184 | ACPI_EXPORT_SYMBOL(acpi_disable_gpe) |
185 | 185 | ||
186 | /******************************************************************************* | ||
187 | * | ||
188 | * FUNCTION: acpi_set_gpe | ||
189 | * | ||
190 | * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 | ||
191 | * gpe_number - GPE level within the GPE block | ||
192 | * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE | ||
193 | * | ||
194 | * RETURN: Status | ||
195 | * | ||
196 | * DESCRIPTION: Enable or disable an individual GPE. This function bypasses | ||
197 | * the reference count mechanism used in the acpi_enable_gpe(), | ||
198 | * acpi_disable_gpe() interfaces. | ||
199 | * This API is typically used by the GPE raw handler mode driver | ||
200 | * to switch between the polling mode and the interrupt mode after | ||
201 | * the driver has enabled the GPE. | ||
202 | * The APIs should be invoked in this order: | ||
203 | * acpi_enable_gpe() <- Ensure the reference count > 0 | ||
204 | * acpi_set_gpe(ACPI_GPE_DISABLE) <- Enter polling mode | ||
205 | * acpi_set_gpe(ACPI_GPE_ENABLE) <- Leave polling mode | ||
206 | * acpi_disable_gpe() <- Decrease the reference count | ||
207 | * | ||
208 | * Note: If a GPE is shared by 2 silicon components, then both the drivers | ||
209 | * should support GPE polling mode or disabling the GPE for long period | ||
210 | * for one driver may break the other. So use it with care since all | ||
211 | * firmware _Lxx/_Exx handlers currently rely on the GPE interrupt mode. | ||
212 | * | ||
213 | ******************************************************************************/ | ||
214 | acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) | ||
215 | { | ||
216 | struct acpi_gpe_event_info *gpe_event_info; | ||
217 | acpi_status status; | ||
218 | acpi_cpu_flags flags; | ||
219 | |||
220 | ACPI_FUNCTION_TRACE(acpi_set_gpe); | ||
221 | |||
222 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
223 | |||
224 | /* Ensure that we have a valid GPE number */ | ||
225 | |||
226 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
227 | if (!gpe_event_info) { | ||
228 | status = AE_BAD_PARAMETER; | ||
229 | goto unlock_and_exit; | ||
230 | } | ||
231 | |||
232 | /* Perform the action */ | ||
233 | |||
234 | switch (action) { | ||
235 | case ACPI_GPE_ENABLE: | ||
236 | |||
237 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); | ||
238 | break; | ||
239 | |||
240 | case ACPI_GPE_DISABLE: | ||
241 | |||
242 | status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); | ||
243 | break; | ||
244 | |||
245 | default: | ||
246 | |||
247 | status = AE_BAD_PARAMETER; | ||
248 | break; | ||
249 | } | ||
250 | |||
251 | unlock_and_exit: | ||
252 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
253 | return_ACPI_STATUS(status); | ||
254 | } | ||
255 | |||
256 | ACPI_EXPORT_SYMBOL(acpi_set_gpe) | ||
186 | 257 | ||
187 | /******************************************************************************* | 258 | /******************************************************************************* |
188 | * | 259 | * |
@@ -313,7 +384,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
313 | * known as an "implicit notify". Note: The GPE is assumed to be | 384 | * known as an "implicit notify". Note: The GPE is assumed to be |
314 | * level-triggered (for windows compatibility). | 385 | * level-triggered (for windows compatibility). |
315 | */ | 386 | */ |
316 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 387 | if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
317 | ACPI_GPE_DISPATCH_NONE) { | 388 | ACPI_GPE_DISPATCH_NONE) { |
318 | /* | 389 | /* |
319 | * This is the first device for implicit notify on this GPE. | 390 | * This is the first device for implicit notify on this GPE. |
@@ -327,7 +398,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
327 | * If we already have an implicit notify on this GPE, add | 398 | * If we already have an implicit notify on this GPE, add |
328 | * this device to the notify list. | 399 | * this device to the notify list. |
329 | */ | 400 | */ |
330 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 401 | if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == |
331 | ACPI_GPE_DISPATCH_NOTIFY) { | 402 | ACPI_GPE_DISPATCH_NOTIFY) { |
332 | 403 | ||
333 | /* Ensure that the device is not already in the list */ | 404 | /* Ensure that the device is not already in the list */ |
@@ -530,6 +601,49 @@ unlock_and_exit: | |||
530 | 601 | ||
531 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) | 602 | ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) |
532 | 603 | ||
604 | /******************************************************************************* | ||
605 | * | ||
606 | * FUNCTION: acpi_finish_gpe | ||
607 | * | ||
608 | * PARAMETERS: gpe_device - Namespace node for the GPE Block | ||
609 | * (NULL for FADT defined GPEs) | ||
610 | * gpe_number - GPE level within the GPE block | ||
611 | * | ||
612 | * RETURN: Status | ||
613 | * | ||
614 | * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE | ||
615 | * processing. Intended for use by asynchronous host-installed | ||
616 | * GPE handlers. The GPE is only reenabled if the enable_for_run bit | ||
617 | * is set in the GPE info. | ||
618 | * | ||
619 | ******************************************************************************/ | ||
620 | acpi_status acpi_finish_gpe(acpi_handle gpe_device, u32 gpe_number) | ||
621 | { | ||
622 | struct acpi_gpe_event_info *gpe_event_info; | ||
623 | acpi_status status; | ||
624 | acpi_cpu_flags flags; | ||
625 | |||
626 | ACPI_FUNCTION_TRACE(acpi_finish_gpe); | ||
627 | |||
628 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | ||
629 | |||
630 | /* Ensure that we have a valid GPE number */ | ||
631 | |||
632 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | ||
633 | if (!gpe_event_info) { | ||
634 | status = AE_BAD_PARAMETER; | ||
635 | goto unlock_and_exit; | ||
636 | } | ||
637 | |||
638 | status = acpi_ev_finish_gpe(gpe_event_info); | ||
639 | |||
640 | unlock_and_exit: | ||
641 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | ||
642 | return_ACPI_STATUS(status); | ||
643 | } | ||
644 | |||
645 | ACPI_EXPORT_SYMBOL(acpi_finish_gpe) | ||
646 | |||
533 | /****************************************************************************** | 647 | /****************************************************************************** |
534 | * | 648 | * |
535 | * FUNCTION: acpi_disable_all_gpes | 649 | * FUNCTION: acpi_disable_all_gpes |
@@ -604,7 +718,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) | |||
604 | * all GPE blocks. | 718 | * all GPE blocks. |
605 | * | 719 | * |
606 | ******************************************************************************/ | 720 | ******************************************************************************/ |
607 | |||
608 | acpi_status acpi_enable_all_wakeup_gpes(void) | 721 | acpi_status acpi_enable_all_wakeup_gpes(void) |
609 | { | 722 | { |
610 | acpi_status status; | 723 | acpi_status status; |
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 2d6f187939c7..f21afbab03f7 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 7d2949420db7..6e0df2b9d5a4 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index c545386fee96..89a976b4ccf2 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 95d23dabcfbb..aaeea4840aaa 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index 6fbfad47518c..e67d0aca3fe6 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 0f23c3f2678e..7c213b6b6472 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index b994845ed359..c161dd974f74 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 1d1b27a96c5b..49479927e7f7 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 2207e624f538..b56fc9d6f48e 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index b49ea2a95f4f..472030f2b5bb 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index dbb03b544e8c..453b00c30177 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 1b8e94104407..77930683ab7d 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 2ede656ee26a..fcc618aa2061 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 363767cf01e5..b813fed95e56 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 29e9e99f7fe3..c930edda3f65 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 118e942005e5..4c2836dc825b 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index cd5288a257a9..0fe188e238ef 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index ab060261b43e..c7e3b929aa85 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 3cde553bcbe1..b6b7f3af29e4 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index 3af8de3fcea4..d2964af9ad4d 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index daf49f7ea311..a7eee2400ce0 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 04bd16c08f9e..3101607b4efe 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index fd11018b0168..6fa3c8d8fc5f 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index f7da64123ed5..05450656fe3d 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index d9d72dff2a76..3f4225e95d93 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 1e66d960fc11..e5c5949f9081 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index 858fdd6be598..e5599f610808 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 494027f5c067..84bc550f4f1d 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -54,6 +54,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, | |||
54 | struct acpi_gpe_block_info *gpe_block, | 54 | struct acpi_gpe_block_info *gpe_block, |
55 | void *context); | 55 | void *context); |
56 | 56 | ||
57 | static acpi_status | ||
58 | acpi_hw_gpe_enable_write(u8 enable_mask, | ||
59 | struct acpi_gpe_register_info *gpe_register_info); | ||
60 | |||
57 | /****************************************************************************** | 61 | /****************************************************************************** |
58 | * | 62 | * |
59 | * FUNCTION: acpi_hw_get_gpe_register_bit | 63 | * FUNCTION: acpi_hw_get_gpe_register_bit |
@@ -146,7 +150,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) | |||
146 | 150 | ||
147 | status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); | 151 | status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address); |
148 | if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) { | 152 | if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) { |
149 | gpe_register_info->enable_mask = enable_mask; | 153 | gpe_register_info->enable_mask = (u8)enable_mask; |
150 | } | 154 | } |
151 | return (status); | 155 | return (status); |
152 | } | 156 | } |
@@ -221,7 +225,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, | |||
221 | 225 | ||
222 | /* GPE currently handled? */ | 226 | /* GPE currently handled? */ |
223 | 227 | ||
224 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != | 228 | if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != |
225 | ACPI_GPE_DISPATCH_NONE) { | 229 | ACPI_GPE_DISPATCH_NONE) { |
226 | local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER; | 230 | local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER; |
227 | } | 231 | } |
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index 6aade8e1d2a1..c5214dec4988 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index a4c34d2c556b..3cf77afd142c 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
@@ -6,7 +6,7 @@ | |||
6 | ******************************************************************************/ | 6 | ******************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index d590693eb54e..7d21cae6d602 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 76ab5c1a814e..675c709a300b 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index 6b919127cd9d..2bd33fe56cb3 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 96d007df65ec..5f97468df8ff 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 6921c7f3d208..3b3767698827 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index f1249e3463be..24fa19a76d70 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index 607eb9e5150d..e107f929d9cf 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index 80fcfc8c9c1b..5d347a71bd0b 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index b55642c4ee58..1a8b39c8d969 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 3d88ef4a3e0d..80f097eb7381 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 42d37109aa5d..7dc367e6fe09 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index e634a05974db..7bcc68f57afa 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index a3fb7e4c0809..4a85c4517988 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 7c9d0181f341..bd6cd4a81316 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 7eee0a6f02f6..d293d9748036 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index a42ee9d6970d..677bc9330e64 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c | |||
@@ -6,7 +6,7 @@ | |||
6 | ******************************************************************************/ | 6 | ******************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index e83cff31754b..c95a119767b5 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 392910ffbed9..0eb54315b4be 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 1b13b921dda9..8b79958b7aca 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index 7e417aa5c91e..151fcd95ba84 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index b09e6bef72b8..c30672d23878 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index af1cc42a8aa1..4a9d4a66016e 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index 4a5e3f5c0ff7..6ad02008c0c2 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 4758a1f2ce22..c68609a2bc1b 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 4bd558bf10d2..b6030a2deee1 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c | |||
@@ -6,7 +6,7 @@ | |||
6 | ******************************************************************************/ | 6 | ******************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 8c6c11ce9760..d66c326485d8 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c | |||
@@ -6,7 +6,7 @@ | |||
6 | *****************************************************************************/ | 6 | *****************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index dae9401be7a2..793383501f81 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c | |||
@@ -6,7 +6,7 @@ | |||
6 | ******************************************************************************/ | 6 | ******************************************************************************/ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Copyright (C) 2000 - 2014, Intel Corp. | 9 | * Copyright (C) 2000 - 2015, Intel Corp. |
10 | * All rights reserved. | 10 | * All rights reserved. |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or without | 12 | * Redistribution and use in source and binary forms, with or without |
@@ -53,50 +53,6 @@ ACPI_MODULE_NAME("nsxfobj") | |||
53 | 53 | ||
54 | /******************************************************************************* | 54 | /******************************************************************************* |
55 | * | 55 | * |
56 | * FUNCTION: acpi_get_id | ||
57 | * | ||
58 | * PARAMETERS: Handle - Handle of object whose id is desired | ||
59 | * ret_id - Where the id will be placed | ||
60 | * | ||
61 | * RETURN: Status | ||
62 | * | ||
63 | * DESCRIPTION: This routine returns the owner id associated with a handle | ||
64 | * | ||
65 | ******************************************************************************/ | ||
66 | acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id) | ||
67 | { | ||
68 | struct acpi_namespace_node *node; | ||
69 | acpi_status status; | ||
70 | |||
71 | /* Parameter Validation */ | ||
72 | |||
73 | if (!ret_id) { | ||
74 | return (AE_BAD_PARAMETER); | ||
75 | } | ||
76 | |||
77 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | ||
78 | if (ACPI_FAILURE(status)) { | ||
79 | return (status); | ||
80 | } | ||
81 | |||
82 | /* Convert and validate the handle */ | ||
83 | |||
84 | node = acpi_ns_validate_handle(handle); | ||
85 | if (!node) { | ||
86 | (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
87 | return (AE_BAD_PARAMETER); | ||
88 | } | ||
89 | |||
90 | *ret_id = node->owner_id; | ||
91 | |||
92 | status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); | ||
93 | return (status); | ||
94 | } | ||
95 | |||
96 | ACPI_EXPORT_SYMBOL(acpi_get_id) | ||
97 | |||
98 | /******************************************************************************* | ||
99 | * | ||
100 | * FUNCTION: acpi_get_type | 56 | * FUNCTION: acpi_get_type |
101 | * | 57 | * |
102 | * PARAMETERS: handle - Handle of object whose type is desired | 58 | * PARAMETERS: handle - Handle of object whose type is desired |
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 314d314340ae..6d038770577b 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index b058e2390fdd..90437227d790 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index a6885077d59e..2f5ddd806c58 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 1755d2ac5656..1af4a405e351 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index 0d8d37ffd04d..e18e7c47f482 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 6d27b597394e..a555f7f7b9a2 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index 32d250feea21..9d669cc6cb62 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index 0b64181e7720..89984f30addc 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 3cd48802eede..960505ab409a 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index 9cb07e1e76d9..ba5f69171288 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index e135acaa5e1c..841a5ea06094 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index 916fd095ff34..66d406e8fe36 100644 --- a/drivers/acpi/acpica/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -74,7 +74,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address16[5] = { | |||
74 | * Address Translation Offset | 74 | * Address Translation Offset |
75 | * Address Length | 75 | * Address Length |
76 | */ | 76 | */ |
77 | {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity), | 77 | {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.address.granularity), |
78 | AML_OFFSET(address16.granularity), | 78 | AML_OFFSET(address16.granularity), |
79 | 5}, | 79 | 5}, |
80 | 80 | ||
@@ -112,7 +112,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address32[5] = { | |||
112 | * Address Translation Offset | 112 | * Address Translation Offset |
113 | * Address Length | 113 | * Address Length |
114 | */ | 114 | */ |
115 | {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity), | 115 | {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.address.granularity), |
116 | AML_OFFSET(address32.granularity), | 116 | AML_OFFSET(address32.granularity), |
117 | 5}, | 117 | 5}, |
118 | 118 | ||
@@ -150,7 +150,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address64[5] = { | |||
150 | * Address Translation Offset | 150 | * Address Translation Offset |
151 | * Address Length | 151 | * Address Length |
152 | */ | 152 | */ |
153 | {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity), | 153 | {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.address.granularity), |
154 | AML_OFFSET(address64.granularity), | 154 | AML_OFFSET(address64.granularity), |
155 | 5}, | 155 | 5}, |
156 | 156 | ||
@@ -194,7 +194,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = { | |||
194 | * Address Length | 194 | * Address Length |
195 | * Type-Specific Attribute | 195 | * Type-Specific Attribute |
196 | */ | 196 | */ |
197 | {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity), | 197 | {ACPI_RSC_MOVE64, |
198 | ACPI_RS_OFFSET(data.ext_address64.address.granularity), | ||
198 | AML_OFFSET(ext_address64.granularity), | 199 | AML_OFFSET(ext_address64.granularity), |
199 | 6} | 200 | 6} |
200 | }; | 201 | }; |
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index 689556744b03..cb739a694931 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index 049d9c22a0f9..15434e4c9b34 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c index c3c56b5a9788..1539394c8c52 100644 --- a/drivers/acpi/acpica/rsdump.c +++ b/drivers/acpi/acpica/rsdump.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c index 2f9332d5c973..b29d9ec63d1b 100644 --- a/drivers/acpi/acpica/rsdumpinfo.c +++ b/drivers/acpi/acpica/rsdumpinfo.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -183,15 +183,15 @@ struct acpi_rsdump_info acpi_rs_dump_address16[8] = { | |||
183 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16), | 183 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16), |
184 | "16-Bit WORD Address Space", NULL}, | 184 | "16-Bit WORD Address Space", NULL}, |
185 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, | 185 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, |
186 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity", | 186 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.granularity), |
187 | NULL}, | 187 | "Granularity", NULL}, |
188 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum", | 188 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.minimum), |
189 | NULL}, | 189 | "Address Minimum", NULL}, |
190 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum", | 190 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.maximum), |
191 | NULL}, | 191 | "Address Maximum", NULL}, |
192 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset), | 192 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.translation_offset), |
193 | "Translation Offset", NULL}, | 193 | "Translation Offset", NULL}, |
194 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length), | 194 | {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.address_length), |
195 | "Address Length", NULL}, | 195 | "Address Length", NULL}, |
196 | {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL} | 196 | {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL} |
197 | }; | 197 | }; |
@@ -200,15 +200,15 @@ struct acpi_rsdump_info acpi_rs_dump_address32[8] = { | |||
200 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32), | 200 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32), |
201 | "32-Bit DWORD Address Space", NULL}, | 201 | "32-Bit DWORD Address Space", NULL}, |
202 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, | 202 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, |
203 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity", | 203 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.granularity), |
204 | NULL}, | 204 | "Granularity", NULL}, |
205 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum", | 205 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.minimum), |
206 | NULL}, | 206 | "Address Minimum", NULL}, |
207 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum", | 207 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.maximum), |
208 | NULL}, | 208 | "Address Maximum", NULL}, |
209 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset), | 209 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.translation_offset), |
210 | "Translation Offset", NULL}, | 210 | "Translation Offset", NULL}, |
211 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length), | 211 | {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.address_length), |
212 | "Address Length", NULL}, | 212 | "Address Length", NULL}, |
213 | {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL} | 213 | {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL} |
214 | }; | 214 | }; |
@@ -217,15 +217,15 @@ struct acpi_rsdump_info acpi_rs_dump_address64[8] = { | |||
217 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64), | 217 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64), |
218 | "64-Bit QWORD Address Space", NULL}, | 218 | "64-Bit QWORD Address Space", NULL}, |
219 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, | 219 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, |
220 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity", | 220 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.granularity), |
221 | NULL}, | 221 | "Granularity", NULL}, |
222 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum", | 222 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.minimum), |
223 | NULL}, | 223 | "Address Minimum", NULL}, |
224 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum", | 224 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.maximum), |
225 | NULL}, | 225 | "Address Maximum", NULL}, |
226 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset), | 226 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.translation_offset), |
227 | "Translation Offset", NULL}, | 227 | "Translation Offset", NULL}, |
228 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length), | 228 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.address_length), |
229 | "Address Length", NULL}, | 229 | "Address Length", NULL}, |
230 | {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL} | 230 | {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL} |
231 | }; | 231 | }; |
@@ -234,15 +234,16 @@ struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = { | |||
234 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64), | 234 | {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64), |
235 | "64-Bit Extended Address Space", NULL}, | 235 | "64-Bit Extended Address Space", NULL}, |
236 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, | 236 | {ACPI_RSD_ADDRESS, 0, NULL, NULL}, |
237 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity), | 237 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.granularity), |
238 | "Granularity", NULL}, | 238 | "Granularity", NULL}, |
239 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum), | 239 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.minimum), |
240 | "Address Minimum", NULL}, | 240 | "Address Minimum", NULL}, |
241 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum), | 241 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.maximum), |
242 | "Address Maximum", NULL}, | 242 | "Address Maximum", NULL}, |
243 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset), | 243 | {ACPI_RSD_UINT64, |
244 | ACPI_RSD_OFFSET(ext_address64.address.translation_offset), | ||
244 | "Translation Offset", NULL}, | 245 | "Translation Offset", NULL}, |
245 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length), | 246 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.address_length), |
246 | "Address Length", NULL}, | 247 | "Address Length", NULL}, |
247 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific), | 248 | {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific), |
248 | "Type-Specific Attribute", NULL} | 249 | "Type-Specific Attribute", NULL} |
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c index 9d3f8a9a24bd..edecfc675979 100644 --- a/drivers/acpi/acpica/rsinfo.c +++ b/drivers/acpi/acpica/rsinfo.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c index 19d64873290a..5adba018bab0 100644 --- a/drivers/acpi/acpica/rsio.c +++ b/drivers/acpi/acpica/rsio.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c index 3461f7db26df..07cfa70a475b 100644 --- a/drivers/acpi/acpica/rsirq.c +++ b/drivers/acpi/acpica/rsirq.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 77291293af64..50d5be2ee062 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c index eab4483ff5f8..c6b80862030e 100644 --- a/drivers/acpi/acpica/rsmemory.c +++ b/drivers/acpi/acpica/rsmemory.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c index 41eea4bc089c..1fe49d223663 100644 --- a/drivers/acpi/acpica/rsmisc.c +++ b/drivers/acpi/acpica/rsmisc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c index 9e8407223d95..4c8c6fe6ea74 100644 --- a/drivers/acpi/acpica/rsserial.c +++ b/drivers/acpi/acpica/rsserial.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index 897a5ceb0420..ece3cd60cc6a 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c index 877ab9202133..8e6276df0226 100644 --- a/drivers/acpi/acpica/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("rsxface") | |||
60 | ACPI_COPY_FIELD(out, in, min_address_fixed); \ | 60 | ACPI_COPY_FIELD(out, in, min_address_fixed); \ |
61 | ACPI_COPY_FIELD(out, in, max_address_fixed); \ | 61 | ACPI_COPY_FIELD(out, in, max_address_fixed); \ |
62 | ACPI_COPY_FIELD(out, in, info); \ | 62 | ACPI_COPY_FIELD(out, in, info); \ |
63 | ACPI_COPY_FIELD(out, in, granularity); \ | 63 | ACPI_COPY_FIELD(out, in, address.granularity); \ |
64 | ACPI_COPY_FIELD(out, in, minimum); \ | 64 | ACPI_COPY_FIELD(out, in, address.minimum); \ |
65 | ACPI_COPY_FIELD(out, in, maximum); \ | 65 | ACPI_COPY_FIELD(out, in, address.maximum); \ |
66 | ACPI_COPY_FIELD(out, in, translation_offset); \ | 66 | ACPI_COPY_FIELD(out, in, address.translation_offset); \ |
67 | ACPI_COPY_FIELD(out, in, address_length); \ | 67 | ACPI_COPY_FIELD(out, in, address.address_length); \ |
68 | ACPI_COPY_FIELD(out, in, resource_source); | 68 | ACPI_COPY_FIELD(out, in, resource_source); |
69 | /* Local prototypes */ | 69 | /* Local prototypes */ |
70 | static acpi_status | 70 | static acpi_status |
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index f499c10ceb4a..6a144957aadd 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 41519a958083..7d2486005e3f 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index cb947700206c..0b879fcfef67 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 755b90c40ddf..9bad45e63a45 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index df3bb20ea325..ef16c06e5091 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 6b1ca9991b90..6559a58439c5 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 6482b0ded652..60e94f87f27a 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
@@ -265,45 +265,6 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header) | |||
265 | 265 | ||
266 | /******************************************************************************* | 266 | /******************************************************************************* |
267 | * | 267 | * |
268 | * FUNCTION: acpi_unload_table_id | ||
269 | * | ||
270 | * PARAMETERS: id - Owner ID of the table to be removed. | ||
271 | * | ||
272 | * RETURN: Status | ||
273 | * | ||
274 | * DESCRIPTION: This routine is used to force the unload of a table (by id) | ||
275 | * | ||
276 | ******************************************************************************/ | ||
277 | acpi_status acpi_unload_table_id(acpi_owner_id id) | ||
278 | { | ||
279 | int i; | ||
280 | acpi_status status = AE_NOT_EXIST; | ||
281 | |||
282 | ACPI_FUNCTION_TRACE(acpi_unload_table_id); | ||
283 | |||
284 | /* Find table in the global table list */ | ||
285 | for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) { | ||
286 | if (id != acpi_gbl_root_table_list.tables[i].owner_id) { | ||
287 | continue; | ||
288 | } | ||
289 | /* | ||
290 | * Delete all namespace objects owned by this table. Note that these | ||
291 | * objects can appear anywhere in the namespace by virtue of the AML | ||
292 | * "Scope" operator. Thus, we need to track ownership by an ID, not | ||
293 | * simply a position within the hierarchy | ||
294 | */ | ||
295 | acpi_tb_delete_namespace_by_owner(i); | ||
296 | status = acpi_tb_release_owner_id(i); | ||
297 | acpi_tb_set_table_loaded_flag(i, FALSE); | ||
298 | break; | ||
299 | } | ||
300 | return_ACPI_STATUS(status); | ||
301 | } | ||
302 | |||
303 | ACPI_EXPORT_SYMBOL(acpi_unload_table_id) | ||
304 | |||
305 | /******************************************************************************* | ||
306 | * | ||
307 | * FUNCTION: acpi_get_table_with_size | 268 | * FUNCTION: acpi_get_table_with_size |
308 | * | 269 | * |
309 | * PARAMETERS: signature - ACPI signature of needed table | 270 | * PARAMETERS: signature - ACPI signature of needed table |
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index ab5308b81aa8..aadb3002a2dd 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 43a54af2b548..eac52cf14f1a 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index a1acec9d2ef3..1279f50da757 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index efac83c606dc..61d8f6d186d1 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index 038ea887f562..242bd071f007 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 78fde0aac487..eacc5eee362e 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index ff601c0f7c7a..c37ec5035f4c 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index e516254c63b2..57078e3ea9b7 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 40e923e675fc..988e23b7795c 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index a3516de213fa..71fce389fd48 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 8e544d4688cd..9ef80f2828e3 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 8fed1482d228..6c738fa0cd42 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c index 0403dcaabaf2..743a0ae9fb17 100644 --- a/drivers/acpi/acpica/utexcep.c +++ b/drivers/acpi/acpica/utexcep.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c index 4e263a8cc6f0..7e1168be39fa 100644 --- a/drivers/acpi/acpica/utfileio.c +++ b/drivers/acpi/acpica/utfileio.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index 77ceac715f28..5e8df9177da4 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index 9afa9441b183..aa448278ba28 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 4b12880e5b11..27431cfc1c44 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 77120ec9ea86..e402e07b4846 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index dc6e96547f18..089f78bbd59b 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index d44dee6ee10a..f9ff100f0159 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index 2e2bb14e1099..56bbacd576f2 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 82717fff9ffc..37b8b58fcd56 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index dfa9009bfc87..7d83efe1ea29 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 685766fc6ca8..574cd3118313 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c index 36bec57ebd23..2959217067cb 100644 --- a/drivers/acpi/acpica/utownerid.c +++ b/drivers/acpi/acpica/utownerid.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index db30caff130a..29e449935a82 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index 0ce3f5a0dd67..82ca9142e10d 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index bc1ff820c7dd..b3505dbc715e 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index 1cc97a752c15..8274cc16edc3 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c index 6dc54b3c28b0..83b6c52490dc 100644 --- a/drivers/acpi/acpica/utstring.c +++ b/drivers/acpi/acpica/utstring.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 7d0ee969d781..130dd9f96f0f 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c index 4dc33130f134..c6149a212149 100644 --- a/drivers/acpi/acpica/utuuid.c +++ b/drivers/acpi/acpica/utuuid.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index 49c873c68756..0929187bdce0 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 88ef77f3cf88..306e785f9418 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index b1fd6886e439..083a76891889 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c | |||
@@ -5,7 +5,7 @@ | |||
5 | *****************************************************************************/ | 5 | *****************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c index 2a0f9e04d3a4..f2606af3364c 100644 --- a/drivers/acpi/acpica/utxfmutex.c +++ b/drivers/acpi/acpica/utxfmutex.c | |||
@@ -5,7 +5,7 @@ | |||
5 | ******************************************************************************/ | 5 | ******************************************************************************/ |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * Copyright (C) 2000 - 2014, Intel Corp. | 8 | * Copyright (C) 2000 - 2015, Intel Corp. |
9 | * All rights reserved. | 9 | * All rights reserved. |
10 | * | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | 11 | * Redistribution and use in source and binary forms, with or without |
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 2cd7bdd6c8b3..a85ac07f3da3 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c | |||
@@ -449,7 +449,7 @@ int apei_resources_sub(struct apei_resources *resources1, | |||
449 | } | 449 | } |
450 | EXPORT_SYMBOL_GPL(apei_resources_sub); | 450 | EXPORT_SYMBOL_GPL(apei_resources_sub); |
451 | 451 | ||
452 | static int apei_get_nvs_callback(__u64 start, __u64 size, void *data) | 452 | static int apei_get_res_callback(__u64 start, __u64 size, void *data) |
453 | { | 453 | { |
454 | struct apei_resources *resources = data; | 454 | struct apei_resources *resources = data; |
455 | return apei_res_add(&resources->iomem, start, size); | 455 | return apei_res_add(&resources->iomem, start, size); |
@@ -457,7 +457,15 @@ static int apei_get_nvs_callback(__u64 start, __u64 size, void *data) | |||
457 | 457 | ||
458 | static int apei_get_nvs_resources(struct apei_resources *resources) | 458 | static int apei_get_nvs_resources(struct apei_resources *resources) |
459 | { | 459 | { |
460 | return acpi_nvs_for_each_region(apei_get_nvs_callback, resources); | 460 | return acpi_nvs_for_each_region(apei_get_res_callback, resources); |
461 | } | ||
462 | |||
463 | int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size, | ||
464 | void *data), void *data); | ||
465 | static int apei_get_arch_resources(struct apei_resources *resources) | ||
466 | |||
467 | { | ||
468 | return arch_apei_filter_addr(apei_get_res_callback, resources); | ||
461 | } | 469 | } |
462 | 470 | ||
463 | /* | 471 | /* |
@@ -470,7 +478,7 @@ int apei_resources_request(struct apei_resources *resources, | |||
470 | { | 478 | { |
471 | struct apei_res *res, *res_bak = NULL; | 479 | struct apei_res *res, *res_bak = NULL; |
472 | struct resource *r; | 480 | struct resource *r; |
473 | struct apei_resources nvs_resources; | 481 | struct apei_resources nvs_resources, arch_res; |
474 | int rc; | 482 | int rc; |
475 | 483 | ||
476 | rc = apei_resources_sub(resources, &apei_resources_all); | 484 | rc = apei_resources_sub(resources, &apei_resources_all); |
@@ -485,10 +493,20 @@ int apei_resources_request(struct apei_resources *resources, | |||
485 | apei_resources_init(&nvs_resources); | 493 | apei_resources_init(&nvs_resources); |
486 | rc = apei_get_nvs_resources(&nvs_resources); | 494 | rc = apei_get_nvs_resources(&nvs_resources); |
487 | if (rc) | 495 | if (rc) |
488 | goto res_fini; | 496 | goto nvs_res_fini; |
489 | rc = apei_resources_sub(resources, &nvs_resources); | 497 | rc = apei_resources_sub(resources, &nvs_resources); |
490 | if (rc) | 498 | if (rc) |
491 | goto res_fini; | 499 | goto nvs_res_fini; |
500 | |||
501 | if (arch_apei_filter_addr) { | ||
502 | apei_resources_init(&arch_res); | ||
503 | rc = apei_get_arch_resources(&arch_res); | ||
504 | if (rc) | ||
505 | goto arch_res_fini; | ||
506 | rc = apei_resources_sub(resources, &arch_res); | ||
507 | if (rc) | ||
508 | goto arch_res_fini; | ||
509 | } | ||
492 | 510 | ||
493 | rc = -EINVAL; | 511 | rc = -EINVAL; |
494 | list_for_each_entry(res, &resources->iomem, list) { | 512 | list_for_each_entry(res, &resources->iomem, list) { |
@@ -536,7 +554,9 @@ err_unmap_iomem: | |||
536 | break; | 554 | break; |
537 | release_mem_region(res->start, res->end - res->start); | 555 | release_mem_region(res->start, res->end - res->start); |
538 | } | 556 | } |
539 | res_fini: | 557 | arch_res_fini: |
558 | apei_resources_fini(&arch_res); | ||
559 | nvs_res_fini: | ||
540 | apei_resources_fini(&nvs_resources); | 560 | apei_resources_fini(&nvs_resources); |
541 | return rc; | 561 | return rc; |
542 | } | 562 | } |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index c0d44d394ca3..735db11a9b00 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
@@ -1027,7 +1027,6 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze); | |||
1027 | 1027 | ||
1028 | static struct dev_pm_domain acpi_general_pm_domain = { | 1028 | static struct dev_pm_domain acpi_general_pm_domain = { |
1029 | .ops = { | 1029 | .ops = { |
1030 | #ifdef CONFIG_PM | ||
1031 | .runtime_suspend = acpi_subsys_runtime_suspend, | 1030 | .runtime_suspend = acpi_subsys_runtime_suspend, |
1032 | .runtime_resume = acpi_subsys_runtime_resume, | 1031 | .runtime_resume = acpi_subsys_runtime_resume, |
1033 | #ifdef CONFIG_PM_SLEEP | 1032 | #ifdef CONFIG_PM_SLEEP |
@@ -1041,7 +1040,6 @@ static struct dev_pm_domain acpi_general_pm_domain = { | |||
1041 | .poweroff_late = acpi_subsys_suspend_late, | 1040 | .poweroff_late = acpi_subsys_suspend_late, |
1042 | .restore_early = acpi_subsys_resume_early, | 1041 | .restore_early = acpi_subsys_resume_early, |
1043 | #endif | 1042 | #endif |
1044 | #endif | ||
1045 | }, | 1043 | }, |
1046 | }; | 1044 | }; |
1047 | 1045 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 1b5853f384e2..982b67faaaf3 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * ec.c - ACPI Embedded Controller Driver (v2.2) | 2 | * ec.c - ACPI Embedded Controller Driver (v3) |
3 | * | 3 | * |
4 | * Copyright (C) 2001-2014 Intel Corporation | 4 | * Copyright (C) 2001-2015 Intel Corporation |
5 | * Author: 2014 Lv Zheng <lv.zheng@intel.com> | 5 | * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com> |
6 | * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> | 6 | * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> |
7 | * 2006 Denis Sadykov <denis.m.sadykov@intel.com> | 7 | * 2006 Denis Sadykov <denis.m.sadykov@intel.com> |
8 | * 2004 Luming Yu <luming.yu@intel.com> | 8 | * 2004 Luming Yu <luming.yu@intel.com> |
@@ -71,15 +71,18 @@ enum ec_command { | |||
71 | #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ | 71 | #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ |
72 | #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ | 72 | #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ |
73 | #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ | 73 | #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ |
74 | #define ACPI_EC_UDELAY_POLL 1000 /* Wait 1ms for EC transaction polling */ | ||
74 | #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query | 75 | #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query |
75 | * when trying to clear the EC */ | 76 | * when trying to clear the EC */ |
76 | 77 | ||
77 | enum { | 78 | enum { |
78 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ | 79 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ |
79 | EC_FLAGS_GPE_STORM, /* GPE storm detected */ | ||
80 | EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and | 80 | EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and |
81 | * OpReg are installed */ | 81 | * OpReg are installed */ |
82 | EC_FLAGS_BLOCKED, /* Transactions are blocked */ | 82 | EC_FLAGS_STARTED, /* Driver is started */ |
83 | EC_FLAGS_STOPPED, /* Driver is stopped */ | ||
84 | EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the | ||
85 | * current command processing */ | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ | 88 | #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ |
@@ -105,6 +108,7 @@ struct acpi_ec_query_handler { | |||
105 | acpi_handle handle; | 108 | acpi_handle handle; |
106 | void *data; | 109 | void *data; |
107 | u8 query_bit; | 110 | u8 query_bit; |
111 | struct kref kref; | ||
108 | }; | 112 | }; |
109 | 113 | ||
110 | struct transaction { | 114 | struct transaction { |
@@ -117,8 +121,12 @@ struct transaction { | |||
117 | u8 wlen; | 121 | u8 wlen; |
118 | u8 rlen; | 122 | u8 rlen; |
119 | u8 flags; | 123 | u8 flags; |
124 | unsigned long timestamp; | ||
120 | }; | 125 | }; |
121 | 126 | ||
127 | static int acpi_ec_query(struct acpi_ec *ec, u8 *data); | ||
128 | static void advance_transaction(struct acpi_ec *ec); | ||
129 | |||
122 | struct acpi_ec *boot_ec, *first_ec; | 130 | struct acpi_ec *boot_ec, *first_ec; |
123 | EXPORT_SYMBOL(first_ec); | 131 | EXPORT_SYMBOL(first_ec); |
124 | 132 | ||
@@ -129,7 +137,22 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ | |||
129 | static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ | 137 | static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ |
130 | 138 | ||
131 | /* -------------------------------------------------------------------------- | 139 | /* -------------------------------------------------------------------------- |
132 | * Transaction Management | 140 | * Device Flags |
141 | * -------------------------------------------------------------------------- */ | ||
142 | |||
143 | static bool acpi_ec_started(struct acpi_ec *ec) | ||
144 | { | ||
145 | return test_bit(EC_FLAGS_STARTED, &ec->flags) && | ||
146 | !test_bit(EC_FLAGS_STOPPED, &ec->flags); | ||
147 | } | ||
148 | |||
149 | static bool acpi_ec_flushed(struct acpi_ec *ec) | ||
150 | { | ||
151 | return ec->reference_count == 1; | ||
152 | } | ||
153 | |||
154 | /* -------------------------------------------------------------------------- | ||
155 | * EC Registers | ||
133 | * -------------------------------------------------------------------------- */ | 156 | * -------------------------------------------------------------------------- */ |
134 | 157 | ||
135 | static inline u8 acpi_ec_read_status(struct acpi_ec *ec) | 158 | static inline u8 acpi_ec_read_status(struct acpi_ec *ec) |
@@ -151,6 +174,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec) | |||
151 | { | 174 | { |
152 | u8 x = inb(ec->data_addr); | 175 | u8 x = inb(ec->data_addr); |
153 | 176 | ||
177 | ec->curr->timestamp = jiffies; | ||
154 | pr_debug("EC_DATA(R) = 0x%2.2x\n", x); | 178 | pr_debug("EC_DATA(R) = 0x%2.2x\n", x); |
155 | return x; | 179 | return x; |
156 | } | 180 | } |
@@ -159,12 +183,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) | |||
159 | { | 183 | { |
160 | pr_debug("EC_SC(W) = 0x%2.2x\n", command); | 184 | pr_debug("EC_SC(W) = 0x%2.2x\n", command); |
161 | outb(command, ec->command_addr); | 185 | outb(command, ec->command_addr); |
186 | ec->curr->timestamp = jiffies; | ||
162 | } | 187 | } |
163 | 188 | ||
164 | static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) | 189 | static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) |
165 | { | 190 | { |
166 | pr_debug("EC_DATA(W) = 0x%2.2x\n", data); | 191 | pr_debug("EC_DATA(W) = 0x%2.2x\n", data); |
167 | outb(data, ec->data_addr); | 192 | outb(data, ec->data_addr); |
193 | ec->curr->timestamp = jiffies; | ||
168 | } | 194 | } |
169 | 195 | ||
170 | #ifdef DEBUG | 196 | #ifdef DEBUG |
@@ -188,6 +214,140 @@ static const char *acpi_ec_cmd_string(u8 cmd) | |||
188 | #define acpi_ec_cmd_string(cmd) "UNDEF" | 214 | #define acpi_ec_cmd_string(cmd) "UNDEF" |
189 | #endif | 215 | #endif |
190 | 216 | ||
217 | /* -------------------------------------------------------------------------- | ||
218 | * GPE Registers | ||
219 | * -------------------------------------------------------------------------- */ | ||
220 | |||
221 | static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec) | ||
222 | { | ||
223 | acpi_event_status gpe_status = 0; | ||
224 | |||
225 | (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); | ||
226 | return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false; | ||
227 | } | ||
228 | |||
229 | static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) | ||
230 | { | ||
231 | if (open) | ||
232 | acpi_enable_gpe(NULL, ec->gpe); | ||
233 | else { | ||
234 | BUG_ON(ec->reference_count < 1); | ||
235 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); | ||
236 | } | ||
237 | if (acpi_ec_is_gpe_raised(ec)) { | ||
238 | /* | ||
239 | * On some platforms, EN=1 writes cannot trigger GPE. So | ||
240 | * software need to manually trigger a pseudo GPE event on | ||
241 | * EN=1 writes. | ||
242 | */ | ||
243 | pr_debug("***** Polling quirk *****\n"); | ||
244 | advance_transaction(ec); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close) | ||
249 | { | ||
250 | if (close) | ||
251 | acpi_disable_gpe(NULL, ec->gpe); | ||
252 | else { | ||
253 | BUG_ON(ec->reference_count < 1); | ||
254 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | static inline void acpi_ec_clear_gpe(struct acpi_ec *ec) | ||
259 | { | ||
260 | /* | ||
261 | * GPE STS is a W1C register, which means: | ||
262 | * 1. Software can clear it without worrying about clearing other | ||
263 | * GPEs' STS bits when the hardware sets them in parallel. | ||
264 | * 2. As long as software can ensure only clearing it when it is | ||
265 | * set, hardware won't set it in parallel. | ||
266 | * So software can clear GPE in any contexts. | ||
267 | * Warning: do not move the check into advance_transaction() as the | ||
268 | * EC commands will be sent without GPE raised. | ||
269 | */ | ||
270 | if (!acpi_ec_is_gpe_raised(ec)) | ||
271 | return; | ||
272 | acpi_clear_gpe(NULL, ec->gpe); | ||
273 | } | ||
274 | |||
275 | /* -------------------------------------------------------------------------- | ||
276 | * Transaction Management | ||
277 | * -------------------------------------------------------------------------- */ | ||
278 | |||
279 | static void acpi_ec_submit_request(struct acpi_ec *ec) | ||
280 | { | ||
281 | ec->reference_count++; | ||
282 | if (ec->reference_count == 1) | ||
283 | acpi_ec_enable_gpe(ec, true); | ||
284 | } | ||
285 | |||
286 | static void acpi_ec_complete_request(struct acpi_ec *ec) | ||
287 | { | ||
288 | bool flushed = false; | ||
289 | |||
290 | ec->reference_count--; | ||
291 | if (ec->reference_count == 0) | ||
292 | acpi_ec_disable_gpe(ec, true); | ||
293 | flushed = acpi_ec_flushed(ec); | ||
294 | if (flushed) | ||
295 | wake_up(&ec->wait); | ||
296 | } | ||
297 | |||
298 | static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag) | ||
299 | { | ||
300 | if (!test_bit(flag, &ec->flags)) { | ||
301 | acpi_ec_disable_gpe(ec, false); | ||
302 | pr_debug("+++++ Polling enabled +++++\n"); | ||
303 | set_bit(flag, &ec->flags); | ||
304 | } | ||
305 | } | ||
306 | |||
307 | static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag) | ||
308 | { | ||
309 | if (test_bit(flag, &ec->flags)) { | ||
310 | clear_bit(flag, &ec->flags); | ||
311 | acpi_ec_enable_gpe(ec, false); | ||
312 | pr_debug("+++++ Polling disabled +++++\n"); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * acpi_ec_submit_flushable_request() - Increase the reference count unless | ||
318 | * the flush operation is not in | ||
319 | * progress | ||
320 | * @ec: the EC device | ||
321 | * | ||
322 | * This function must be used before taking a new action that should hold | ||
323 | * the reference count. If this function returns false, then the action | ||
324 | * must be discarded or it will prevent the flush operation from being | ||
325 | * completed. | ||
326 | */ | ||
327 | static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) | ||
328 | { | ||
329 | if (!acpi_ec_started(ec)) | ||
330 | return false; | ||
331 | acpi_ec_submit_request(ec); | ||
332 | return true; | ||
333 | } | ||
334 | |||
335 | static void acpi_ec_submit_query(struct acpi_ec *ec) | ||
336 | { | ||
337 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { | ||
338 | pr_debug("***** Event started *****\n"); | ||
339 | schedule_work(&ec->work); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | static void acpi_ec_complete_query(struct acpi_ec *ec) | ||
344 | { | ||
345 | if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { | ||
346 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | ||
347 | pr_debug("***** Event stopped *****\n"); | ||
348 | } | ||
349 | } | ||
350 | |||
191 | static int ec_transaction_completed(struct acpi_ec *ec) | 351 | static int ec_transaction_completed(struct acpi_ec *ec) |
192 | { | 352 | { |
193 | unsigned long flags; | 353 | unsigned long flags; |
@@ -200,7 +360,7 @@ static int ec_transaction_completed(struct acpi_ec *ec) | |||
200 | return ret; | 360 | return ret; |
201 | } | 361 | } |
202 | 362 | ||
203 | static bool advance_transaction(struct acpi_ec *ec) | 363 | static void advance_transaction(struct acpi_ec *ec) |
204 | { | 364 | { |
205 | struct transaction *t; | 365 | struct transaction *t; |
206 | u8 status; | 366 | u8 status; |
@@ -208,6 +368,12 @@ static bool advance_transaction(struct acpi_ec *ec) | |||
208 | 368 | ||
209 | pr_debug("===== %s (%d) =====\n", | 369 | pr_debug("===== %s (%d) =====\n", |
210 | in_interrupt() ? "IRQ" : "TASK", smp_processor_id()); | 370 | in_interrupt() ? "IRQ" : "TASK", smp_processor_id()); |
371 | /* | ||
372 | * By always clearing STS before handling all indications, we can | ||
373 | * ensure a hardware STS 0->1 change after this clearing can always | ||
374 | * trigger a GPE interrupt. | ||
375 | */ | ||
376 | acpi_ec_clear_gpe(ec); | ||
211 | status = acpi_ec_read_status(ec); | 377 | status = acpi_ec_read_status(ec); |
212 | t = ec->curr; | 378 | t = ec->curr; |
213 | if (!t) | 379 | if (!t) |
@@ -235,12 +401,13 @@ static bool advance_transaction(struct acpi_ec *ec) | |||
235 | t->flags |= ACPI_EC_COMMAND_COMPLETE; | 401 | t->flags |= ACPI_EC_COMMAND_COMPLETE; |
236 | wakeup = true; | 402 | wakeup = true; |
237 | } | 403 | } |
238 | return wakeup; | 404 | goto out; |
239 | } else { | 405 | } else { |
240 | if (EC_FLAGS_QUERY_HANDSHAKE && | 406 | if (EC_FLAGS_QUERY_HANDSHAKE && |
241 | !(status & ACPI_EC_FLAG_SCI) && | 407 | !(status & ACPI_EC_FLAG_SCI) && |
242 | (t->command == ACPI_EC_COMMAND_QUERY)) { | 408 | (t->command == ACPI_EC_COMMAND_QUERY)) { |
243 | t->flags |= ACPI_EC_COMMAND_POLL; | 409 | t->flags |= ACPI_EC_COMMAND_POLL; |
410 | acpi_ec_complete_query(ec); | ||
244 | t->rdata[t->ri++] = 0x00; | 411 | t->rdata[t->ri++] = 0x00; |
245 | t->flags |= ACPI_EC_COMMAND_COMPLETE; | 412 | t->flags |= ACPI_EC_COMMAND_COMPLETE; |
246 | pr_debug("***** Command(%s) software completion *****\n", | 413 | pr_debug("***** Command(%s) software completion *****\n", |
@@ -249,9 +416,10 @@ static bool advance_transaction(struct acpi_ec *ec) | |||
249 | } else if ((status & ACPI_EC_FLAG_IBF) == 0) { | 416 | } else if ((status & ACPI_EC_FLAG_IBF) == 0) { |
250 | acpi_ec_write_cmd(ec, t->command); | 417 | acpi_ec_write_cmd(ec, t->command); |
251 | t->flags |= ACPI_EC_COMMAND_POLL; | 418 | t->flags |= ACPI_EC_COMMAND_POLL; |
419 | acpi_ec_complete_query(ec); | ||
252 | } else | 420 | } else |
253 | goto err; | 421 | goto err; |
254 | return wakeup; | 422 | goto out; |
255 | } | 423 | } |
256 | err: | 424 | err: |
257 | /* | 425 | /* |
@@ -259,28 +427,27 @@ err: | |||
259 | * otherwise will take a not handled IRQ as a false one. | 427 | * otherwise will take a not handled IRQ as a false one. |
260 | */ | 428 | */ |
261 | if (!(status & ACPI_EC_FLAG_SCI)) { | 429 | if (!(status & ACPI_EC_FLAG_SCI)) { |
262 | if (in_interrupt() && t) | 430 | if (in_interrupt() && t) { |
263 | ++t->irq_count; | 431 | if (t->irq_count < ec_storm_threshold) |
432 | ++t->irq_count; | ||
433 | /* Allow triggering on 0 threshold */ | ||
434 | if (t->irq_count == ec_storm_threshold) | ||
435 | acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM); | ||
436 | } | ||
264 | } | 437 | } |
265 | return wakeup; | 438 | out: |
439 | if (status & ACPI_EC_FLAG_SCI) | ||
440 | acpi_ec_submit_query(ec); | ||
441 | if (wakeup && in_interrupt()) | ||
442 | wake_up(&ec->wait); | ||
266 | } | 443 | } |
267 | 444 | ||
268 | static void start_transaction(struct acpi_ec *ec) | 445 | static void start_transaction(struct acpi_ec *ec) |
269 | { | 446 | { |
270 | ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; | 447 | ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; |
271 | ec->curr->flags = 0; | 448 | ec->curr->flags = 0; |
272 | (void)advance_transaction(ec); | 449 | ec->curr->timestamp = jiffies; |
273 | } | 450 | advance_transaction(ec); |
274 | |||
275 | static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); | ||
276 | |||
277 | static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) | ||
278 | { | ||
279 | if (state & ACPI_EC_FLAG_SCI) { | ||
280 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) | ||
281 | return acpi_ec_sync_query(ec, NULL); | ||
282 | } | ||
283 | return 0; | ||
284 | } | 451 | } |
285 | 452 | ||
286 | static int ec_poll(struct acpi_ec *ec) | 453 | static int ec_poll(struct acpi_ec *ec) |
@@ -291,20 +458,25 @@ static int ec_poll(struct acpi_ec *ec) | |||
291 | while (repeat--) { | 458 | while (repeat--) { |
292 | unsigned long delay = jiffies + | 459 | unsigned long delay = jiffies + |
293 | msecs_to_jiffies(ec_delay); | 460 | msecs_to_jiffies(ec_delay); |
461 | unsigned long usecs = ACPI_EC_UDELAY_POLL; | ||
294 | do { | 462 | do { |
295 | /* don't sleep with disabled interrupts */ | 463 | /* don't sleep with disabled interrupts */ |
296 | if (EC_FLAGS_MSI || irqs_disabled()) { | 464 | if (EC_FLAGS_MSI || irqs_disabled()) { |
297 | udelay(ACPI_EC_MSI_UDELAY); | 465 | usecs = ACPI_EC_MSI_UDELAY; |
466 | udelay(usecs); | ||
298 | if (ec_transaction_completed(ec)) | 467 | if (ec_transaction_completed(ec)) |
299 | return 0; | 468 | return 0; |
300 | } else { | 469 | } else { |
301 | if (wait_event_timeout(ec->wait, | 470 | if (wait_event_timeout(ec->wait, |
302 | ec_transaction_completed(ec), | 471 | ec_transaction_completed(ec), |
303 | msecs_to_jiffies(1))) | 472 | usecs_to_jiffies(usecs))) |
304 | return 0; | 473 | return 0; |
305 | } | 474 | } |
306 | spin_lock_irqsave(&ec->lock, flags); | 475 | spin_lock_irqsave(&ec->lock, flags); |
307 | (void)advance_transaction(ec); | 476 | if (time_after(jiffies, |
477 | ec->curr->timestamp + | ||
478 | usecs_to_jiffies(usecs))) | ||
479 | advance_transaction(ec); | ||
308 | spin_unlock_irqrestore(&ec->lock, flags); | 480 | spin_unlock_irqrestore(&ec->lock, flags); |
309 | } while (time_before(jiffies, delay)); | 481 | } while (time_before(jiffies, delay)); |
310 | pr_debug("controller reset, restart transaction\n"); | 482 | pr_debug("controller reset, restart transaction\n"); |
@@ -325,21 +497,27 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, | |||
325 | udelay(ACPI_EC_MSI_UDELAY); | 497 | udelay(ACPI_EC_MSI_UDELAY); |
326 | /* start transaction */ | 498 | /* start transaction */ |
327 | spin_lock_irqsave(&ec->lock, tmp); | 499 | spin_lock_irqsave(&ec->lock, tmp); |
500 | /* Enable GPE for command processing (IBF=0/OBF=1) */ | ||
501 | if (!acpi_ec_submit_flushable_request(ec)) { | ||
502 | ret = -EINVAL; | ||
503 | goto unlock; | ||
504 | } | ||
328 | /* following two actions should be kept atomic */ | 505 | /* following two actions should be kept atomic */ |
329 | ec->curr = t; | 506 | ec->curr = t; |
330 | pr_debug("***** Command(%s) started *****\n", | 507 | pr_debug("***** Command(%s) started *****\n", |
331 | acpi_ec_cmd_string(t->command)); | 508 | acpi_ec_cmd_string(t->command)); |
332 | start_transaction(ec); | 509 | start_transaction(ec); |
333 | if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { | ||
334 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | ||
335 | pr_debug("***** Event stopped *****\n"); | ||
336 | } | ||
337 | spin_unlock_irqrestore(&ec->lock, tmp); | 510 | spin_unlock_irqrestore(&ec->lock, tmp); |
338 | ret = ec_poll(ec); | 511 | ret = ec_poll(ec); |
339 | spin_lock_irqsave(&ec->lock, tmp); | 512 | spin_lock_irqsave(&ec->lock, tmp); |
513 | if (t->irq_count == ec_storm_threshold) | ||
514 | acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM); | ||
340 | pr_debug("***** Command(%s) stopped *****\n", | 515 | pr_debug("***** Command(%s) stopped *****\n", |
341 | acpi_ec_cmd_string(t->command)); | 516 | acpi_ec_cmd_string(t->command)); |
342 | ec->curr = NULL; | 517 | ec->curr = NULL; |
518 | /* Disable GPE for command processing (IBF=0/OBF=1) */ | ||
519 | acpi_ec_complete_request(ec); | ||
520 | unlock: | ||
343 | spin_unlock_irqrestore(&ec->lock, tmp); | 521 | spin_unlock_irqrestore(&ec->lock, tmp); |
344 | return ret; | 522 | return ret; |
345 | } | 523 | } |
@@ -354,10 +532,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
354 | if (t->rdata) | 532 | if (t->rdata) |
355 | memset(t->rdata, 0, t->rlen); | 533 | memset(t->rdata, 0, t->rlen); |
356 | mutex_lock(&ec->mutex); | 534 | mutex_lock(&ec->mutex); |
357 | if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) { | ||
358 | status = -EINVAL; | ||
359 | goto unlock; | ||
360 | } | ||
361 | if (ec->global_lock) { | 535 | if (ec->global_lock) { |
362 | status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); | 536 | status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); |
363 | if (ACPI_FAILURE(status)) { | 537 | if (ACPI_FAILURE(status)) { |
@@ -365,26 +539,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
365 | goto unlock; | 539 | goto unlock; |
366 | } | 540 | } |
367 | } | 541 | } |
368 | /* disable GPE during transaction if storm is detected */ | ||
369 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
370 | /* It has to be disabled, so that it doesn't trigger. */ | ||
371 | acpi_disable_gpe(NULL, ec->gpe); | ||
372 | } | ||
373 | 542 | ||
374 | status = acpi_ec_transaction_unlocked(ec, t); | 543 | status = acpi_ec_transaction_unlocked(ec, t); |
375 | 544 | ||
376 | /* check if we received SCI during transaction */ | 545 | if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags)) |
377 | ec_check_sci_sync(ec, acpi_ec_read_status(ec)); | ||
378 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
379 | msleep(1); | 546 | msleep(1); |
380 | /* It is safe to enable the GPE outside of the transaction. */ | ||
381 | acpi_enable_gpe(NULL, ec->gpe); | ||
382 | } else if (t->irq_count > ec_storm_threshold) { | ||
383 | pr_info("GPE storm detected(%d GPEs), " | ||
384 | "transactions will use polling mode\n", | ||
385 | t->irq_count); | ||
386 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | ||
387 | } | ||
388 | if (ec->global_lock) | 547 | if (ec->global_lock) |
389 | acpi_release_global_lock(glk); | 548 | acpi_release_global_lock(glk); |
390 | unlock: | 549 | unlock: |
@@ -500,7 +659,7 @@ static void acpi_ec_clear(struct acpi_ec *ec) | |||
500 | u8 value = 0; | 659 | u8 value = 0; |
501 | 660 | ||
502 | for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { | 661 | for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { |
503 | status = acpi_ec_sync_query(ec, &value); | 662 | status = acpi_ec_query(ec, &value); |
504 | if (status || !value) | 663 | if (status || !value) |
505 | break; | 664 | break; |
506 | } | 665 | } |
@@ -511,6 +670,53 @@ static void acpi_ec_clear(struct acpi_ec *ec) | |||
511 | pr_info("%d stale EC events cleared\n", i); | 670 | pr_info("%d stale EC events cleared\n", i); |
512 | } | 671 | } |
513 | 672 | ||
673 | static void acpi_ec_start(struct acpi_ec *ec, bool resuming) | ||
674 | { | ||
675 | unsigned long flags; | ||
676 | |||
677 | spin_lock_irqsave(&ec->lock, flags); | ||
678 | if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { | ||
679 | pr_debug("+++++ Starting EC +++++\n"); | ||
680 | /* Enable GPE for event processing (SCI_EVT=1) */ | ||
681 | if (!resuming) | ||
682 | acpi_ec_submit_request(ec); | ||
683 | pr_info("+++++ EC started +++++\n"); | ||
684 | } | ||
685 | spin_unlock_irqrestore(&ec->lock, flags); | ||
686 | } | ||
687 | |||
688 | static bool acpi_ec_stopped(struct acpi_ec *ec) | ||
689 | { | ||
690 | unsigned long flags; | ||
691 | bool flushed; | ||
692 | |||
693 | spin_lock_irqsave(&ec->lock, flags); | ||
694 | flushed = acpi_ec_flushed(ec); | ||
695 | spin_unlock_irqrestore(&ec->lock, flags); | ||
696 | return flushed; | ||
697 | } | ||
698 | |||
699 | static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) | ||
700 | { | ||
701 | unsigned long flags; | ||
702 | |||
703 | spin_lock_irqsave(&ec->lock, flags); | ||
704 | if (acpi_ec_started(ec)) { | ||
705 | pr_debug("+++++ Stopping EC +++++\n"); | ||
706 | set_bit(EC_FLAGS_STOPPED, &ec->flags); | ||
707 | spin_unlock_irqrestore(&ec->lock, flags); | ||
708 | wait_event(ec->wait, acpi_ec_stopped(ec)); | ||
709 | spin_lock_irqsave(&ec->lock, flags); | ||
710 | /* Disable GPE for event processing (SCI_EVT=1) */ | ||
711 | if (!suspending) | ||
712 | acpi_ec_complete_request(ec); | ||
713 | clear_bit(EC_FLAGS_STARTED, &ec->flags); | ||
714 | clear_bit(EC_FLAGS_STOPPED, &ec->flags); | ||
715 | pr_info("+++++ EC stopped +++++\n"); | ||
716 | } | ||
717 | spin_unlock_irqrestore(&ec->lock, flags); | ||
718 | } | ||
719 | |||
514 | void acpi_ec_block_transactions(void) | 720 | void acpi_ec_block_transactions(void) |
515 | { | 721 | { |
516 | struct acpi_ec *ec = first_ec; | 722 | struct acpi_ec *ec = first_ec; |
@@ -520,7 +726,7 @@ void acpi_ec_block_transactions(void) | |||
520 | 726 | ||
521 | mutex_lock(&ec->mutex); | 727 | mutex_lock(&ec->mutex); |
522 | /* Prevent transactions from being carried out */ | 728 | /* Prevent transactions from being carried out */ |
523 | set_bit(EC_FLAGS_BLOCKED, &ec->flags); | 729 | acpi_ec_stop(ec, true); |
524 | mutex_unlock(&ec->mutex); | 730 | mutex_unlock(&ec->mutex); |
525 | } | 731 | } |
526 | 732 | ||
@@ -531,14 +737,11 @@ void acpi_ec_unblock_transactions(void) | |||
531 | if (!ec) | 737 | if (!ec) |
532 | return; | 738 | return; |
533 | 739 | ||
534 | mutex_lock(&ec->mutex); | ||
535 | /* Allow transactions to be carried out again */ | 740 | /* Allow transactions to be carried out again */ |
536 | clear_bit(EC_FLAGS_BLOCKED, &ec->flags); | 741 | acpi_ec_start(ec, true); |
537 | 742 | ||
538 | if (EC_FLAGS_CLEAR_ON_RESUME) | 743 | if (EC_FLAGS_CLEAR_ON_RESUME) |
539 | acpi_ec_clear(ec); | 744 | acpi_ec_clear(ec); |
540 | |||
541 | mutex_unlock(&ec->mutex); | ||
542 | } | 745 | } |
543 | 746 | ||
544 | void acpi_ec_unblock_transactions_early(void) | 747 | void acpi_ec_unblock_transactions_early(void) |
@@ -548,36 +751,33 @@ void acpi_ec_unblock_transactions_early(void) | |||
548 | * atomic context during wakeup, so we don't need to acquire the mutex). | 751 | * atomic context during wakeup, so we don't need to acquire the mutex). |
549 | */ | 752 | */ |
550 | if (first_ec) | 753 | if (first_ec) |
551 | clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); | 754 | acpi_ec_start(first_ec, true); |
552 | } | 755 | } |
553 | 756 | ||
554 | static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data) | 757 | /* -------------------------------------------------------------------------- |
758 | Event Management | ||
759 | -------------------------------------------------------------------------- */ | ||
760 | static struct acpi_ec_query_handler * | ||
761 | acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) | ||
555 | { | 762 | { |
556 | int result; | 763 | if (handler) |
557 | u8 d; | 764 | kref_get(&handler->kref); |
558 | struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, | 765 | return handler; |
559 | .wdata = NULL, .rdata = &d, | 766 | } |
560 | .wlen = 0, .rlen = 1}; | ||
561 | 767 | ||
562 | if (!ec || !data) | 768 | static void acpi_ec_query_handler_release(struct kref *kref) |
563 | return -EINVAL; | 769 | { |
564 | /* | 770 | struct acpi_ec_query_handler *handler = |
565 | * Query the EC to find out which _Qxx method we need to evaluate. | 771 | container_of(kref, struct acpi_ec_query_handler, kref); |
566 | * Note that successful completion of the query causes the ACPI_EC_SCI | 772 | |
567 | * bit to be cleared (and thus clearing the interrupt source). | 773 | kfree(handler); |
568 | */ | 774 | } |
569 | result = acpi_ec_transaction_unlocked(ec, &t); | 775 | |
570 | if (result) | 776 | static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler) |
571 | return result; | 777 | { |
572 | if (!d) | 778 | kref_put(&handler->kref, acpi_ec_query_handler_release); |
573 | return -ENODATA; | ||
574 | *data = d; | ||
575 | return 0; | ||
576 | } | 779 | } |
577 | 780 | ||
578 | /* -------------------------------------------------------------------------- | ||
579 | Event Management | ||
580 | -------------------------------------------------------------------------- */ | ||
581 | int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | 781 | int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, |
582 | acpi_handle handle, acpi_ec_query_func func, | 782 | acpi_handle handle, acpi_ec_query_func func, |
583 | void *data) | 783 | void *data) |
@@ -593,6 +793,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | |||
593 | handler->func = func; | 793 | handler->func = func; |
594 | handler->data = data; | 794 | handler->data = data; |
595 | mutex_lock(&ec->mutex); | 795 | mutex_lock(&ec->mutex); |
796 | kref_init(&handler->kref); | ||
596 | list_add(&handler->node, &ec->list); | 797 | list_add(&handler->node, &ec->list); |
597 | mutex_unlock(&ec->mutex); | 798 | mutex_unlock(&ec->mutex); |
598 | return 0; | 799 | return 0; |
@@ -602,15 +803,18 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); | |||
602 | void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) | 803 | void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) |
603 | { | 804 | { |
604 | struct acpi_ec_query_handler *handler, *tmp; | 805 | struct acpi_ec_query_handler *handler, *tmp; |
806 | LIST_HEAD(free_list); | ||
605 | 807 | ||
606 | mutex_lock(&ec->mutex); | 808 | mutex_lock(&ec->mutex); |
607 | list_for_each_entry_safe(handler, tmp, &ec->list, node) { | 809 | list_for_each_entry_safe(handler, tmp, &ec->list, node) { |
608 | if (query_bit == handler->query_bit) { | 810 | if (query_bit == handler->query_bit) { |
609 | list_del(&handler->node); | 811 | list_del_init(&handler->node); |
610 | kfree(handler); | 812 | list_add(&handler->node, &free_list); |
611 | } | 813 | } |
612 | } | 814 | } |
613 | mutex_unlock(&ec->mutex); | 815 | mutex_unlock(&ec->mutex); |
816 | list_for_each_entry(handler, &free_list, node) | ||
817 | acpi_ec_put_query_handler(handler); | ||
614 | } | 818 | } |
615 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); | 819 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); |
616 | 820 | ||
@@ -626,59 +830,56 @@ static void acpi_ec_run(void *cxt) | |||
626 | else if (handler->handle) | 830 | else if (handler->handle) |
627 | acpi_evaluate_object(handler->handle, NULL, NULL, NULL); | 831 | acpi_evaluate_object(handler->handle, NULL, NULL, NULL); |
628 | pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit); | 832 | pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit); |
629 | kfree(handler); | 833 | acpi_ec_put_query_handler(handler); |
630 | } | 834 | } |
631 | 835 | ||
632 | static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) | 836 | static int acpi_ec_query(struct acpi_ec *ec, u8 *data) |
633 | { | 837 | { |
634 | u8 value = 0; | 838 | u8 value = 0; |
635 | int status; | 839 | int result; |
636 | struct acpi_ec_query_handler *handler, *copy; | 840 | acpi_status status; |
841 | struct acpi_ec_query_handler *handler; | ||
842 | struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, | ||
843 | .wdata = NULL, .rdata = &value, | ||
844 | .wlen = 0, .rlen = 1}; | ||
637 | 845 | ||
638 | status = acpi_ec_query_unlocked(ec, &value); | 846 | /* |
847 | * Query the EC to find out which _Qxx method we need to evaluate. | ||
848 | * Note that successful completion of the query causes the ACPI_EC_SCI | ||
849 | * bit to be cleared (and thus clearing the interrupt source). | ||
850 | */ | ||
851 | result = acpi_ec_transaction(ec, &t); | ||
852 | if (result) | ||
853 | return result; | ||
639 | if (data) | 854 | if (data) |
640 | *data = value; | 855 | *data = value; |
641 | if (status) | 856 | if (!value) |
642 | return status; | 857 | return -ENODATA; |
643 | 858 | ||
859 | mutex_lock(&ec->mutex); | ||
644 | list_for_each_entry(handler, &ec->list, node) { | 860 | list_for_each_entry(handler, &ec->list, node) { |
645 | if (value == handler->query_bit) { | 861 | if (value == handler->query_bit) { |
646 | /* have custom handler for this bit */ | 862 | /* have custom handler for this bit */ |
647 | copy = kmalloc(sizeof(*handler), GFP_KERNEL); | 863 | handler = acpi_ec_get_query_handler(handler); |
648 | if (!copy) | ||
649 | return -ENOMEM; | ||
650 | memcpy(copy, handler, sizeof(*copy)); | ||
651 | pr_debug("##### Query(0x%02x) scheduled #####\n", | 864 | pr_debug("##### Query(0x%02x) scheduled #####\n", |
652 | handler->query_bit); | 865 | handler->query_bit); |
653 | return acpi_os_execute((copy->func) ? | 866 | status = acpi_os_execute((handler->func) ? |
654 | OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, | 867 | OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, |
655 | acpi_ec_run, copy); | 868 | acpi_ec_run, handler); |
869 | if (ACPI_FAILURE(status)) | ||
870 | result = -EBUSY; | ||
871 | break; | ||
656 | } | 872 | } |
657 | } | 873 | } |
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static void acpi_ec_gpe_query(void *ec_cxt) | ||
662 | { | ||
663 | struct acpi_ec *ec = ec_cxt; | ||
664 | |||
665 | if (!ec) | ||
666 | return; | ||
667 | mutex_lock(&ec->mutex); | ||
668 | acpi_ec_sync_query(ec, NULL); | ||
669 | mutex_unlock(&ec->mutex); | 874 | mutex_unlock(&ec->mutex); |
875 | return result; | ||
670 | } | 876 | } |
671 | 877 | ||
672 | static int ec_check_sci(struct acpi_ec *ec, u8 state) | 878 | static void acpi_ec_gpe_poller(struct work_struct *work) |
673 | { | 879 | { |
674 | if (state & ACPI_EC_FLAG_SCI) { | 880 | struct acpi_ec *ec = container_of(work, struct acpi_ec, work); |
675 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { | 881 | |
676 | pr_debug("***** Event started *****\n"); | 882 | acpi_ec_query(ec, NULL); |
677 | return acpi_os_execute(OSL_NOTIFY_HANDLER, | ||
678 | acpi_ec_gpe_query, ec); | ||
679 | } | ||
680 | } | ||
681 | return 0; | ||
682 | } | 883 | } |
683 | 884 | ||
684 | static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, | 885 | static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, |
@@ -688,11 +889,9 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, | |||
688 | struct acpi_ec *ec = data; | 889 | struct acpi_ec *ec = data; |
689 | 890 | ||
690 | spin_lock_irqsave(&ec->lock, flags); | 891 | spin_lock_irqsave(&ec->lock, flags); |
691 | if (advance_transaction(ec)) | 892 | advance_transaction(ec); |
692 | wake_up(&ec->wait); | ||
693 | spin_unlock_irqrestore(&ec->lock, flags); | 893 | spin_unlock_irqrestore(&ec->lock, flags); |
694 | ec_check_sci(ec, acpi_ec_read_status(ec)); | 894 | return ACPI_INTERRUPT_HANDLED; |
695 | return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; | ||
696 | } | 895 | } |
697 | 896 | ||
698 | /* -------------------------------------------------------------------------- | 897 | /* -------------------------------------------------------------------------- |
@@ -755,6 +954,7 @@ static struct acpi_ec *make_acpi_ec(void) | |||
755 | init_waitqueue_head(&ec->wait); | 954 | init_waitqueue_head(&ec->wait); |
756 | INIT_LIST_HEAD(&ec->list); | 955 | INIT_LIST_HEAD(&ec->list); |
757 | spin_lock_init(&ec->lock); | 956 | spin_lock_init(&ec->lock); |
957 | INIT_WORK(&ec->work, acpi_ec_gpe_poller); | ||
758 | return ec; | 958 | return ec; |
759 | } | 959 | } |
760 | 960 | ||
@@ -810,13 +1010,13 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
810 | 1010 | ||
811 | if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) | 1011 | if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) |
812 | return 0; | 1012 | return 0; |
813 | status = acpi_install_gpe_handler(NULL, ec->gpe, | 1013 | status = acpi_install_gpe_raw_handler(NULL, ec->gpe, |
814 | ACPI_GPE_EDGE_TRIGGERED, | 1014 | ACPI_GPE_EDGE_TRIGGERED, |
815 | &acpi_ec_gpe_handler, ec); | 1015 | &acpi_ec_gpe_handler, ec); |
816 | if (ACPI_FAILURE(status)) | 1016 | if (ACPI_FAILURE(status)) |
817 | return -ENODEV; | 1017 | return -ENODEV; |
818 | 1018 | ||
819 | acpi_enable_gpe(NULL, ec->gpe); | 1019 | acpi_ec_start(ec, false); |
820 | status = acpi_install_address_space_handler(ec->handle, | 1020 | status = acpi_install_address_space_handler(ec->handle, |
821 | ACPI_ADR_SPACE_EC, | 1021 | ACPI_ADR_SPACE_EC, |
822 | &acpi_ec_space_handler, | 1022 | &acpi_ec_space_handler, |
@@ -831,7 +1031,7 @@ static int ec_install_handlers(struct acpi_ec *ec) | |||
831 | pr_err("Fail in evaluating the _REG object" | 1031 | pr_err("Fail in evaluating the _REG object" |
832 | " of EC device. Broken bios is suspected.\n"); | 1032 | " of EC device. Broken bios is suspected.\n"); |
833 | } else { | 1033 | } else { |
834 | acpi_disable_gpe(NULL, ec->gpe); | 1034 | acpi_ec_stop(ec, false); |
835 | acpi_remove_gpe_handler(NULL, ec->gpe, | 1035 | acpi_remove_gpe_handler(NULL, ec->gpe, |
836 | &acpi_ec_gpe_handler); | 1036 | &acpi_ec_gpe_handler); |
837 | return -ENODEV; | 1037 | return -ENODEV; |
@@ -846,7 +1046,7 @@ static void ec_remove_handlers(struct acpi_ec *ec) | |||
846 | { | 1046 | { |
847 | if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) | 1047 | if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) |
848 | return; | 1048 | return; |
849 | acpi_disable_gpe(NULL, ec->gpe); | 1049 | acpi_ec_stop(ec, false); |
850 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, | 1050 | if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, |
851 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) | 1051 | ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) |
852 | pr_err("failed to remove space handler\n"); | 1052 | pr_err("failed to remove space handler\n"); |
@@ -903,11 +1103,8 @@ static int acpi_ec_add(struct acpi_device *device) | |||
903 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | 1103 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); |
904 | 1104 | ||
905 | /* Clear stale _Q events if hardware might require that */ | 1105 | /* Clear stale _Q events if hardware might require that */ |
906 | if (EC_FLAGS_CLEAR_ON_RESUME) { | 1106 | if (EC_FLAGS_CLEAR_ON_RESUME) |
907 | mutex_lock(&ec->mutex); | ||
908 | acpi_ec_clear(ec); | 1107 | acpi_ec_clear(ec); |
909 | mutex_unlock(&ec->mutex); | ||
910 | } | ||
911 | return ret; | 1108 | return ret; |
912 | } | 1109 | } |
913 | 1110 | ||
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 163e82f536fa..56b321aa2b1c 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -35,6 +35,13 @@ void acpi_int340x_thermal_init(void); | |||
35 | int acpi_sysfs_init(void); | 35 | int acpi_sysfs_init(void); |
36 | void acpi_container_init(void); | 36 | void acpi_container_init(void); |
37 | void acpi_memory_hotplug_init(void); | 37 | void acpi_memory_hotplug_init(void); |
38 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | ||
39 | int acpi_ioapic_add(struct acpi_pci_root *root); | ||
40 | int acpi_ioapic_remove(struct acpi_pci_root *root); | ||
41 | #else | ||
42 | static inline int acpi_ioapic_add(struct acpi_pci_root *root) { return 0; } | ||
43 | static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } | ||
44 | #endif | ||
38 | #ifdef CONFIG_ACPI_DOCK | 45 | #ifdef CONFIG_ACPI_DOCK |
39 | void register_dock_dependent_device(struct acpi_device *adev, | 46 | void register_dock_dependent_device(struct acpi_device *adev, |
40 | acpi_handle dshandle); | 47 | acpi_handle dshandle); |
@@ -68,6 +75,8 @@ static inline void acpi_debugfs_init(void) { return; } | |||
68 | #endif | 75 | #endif |
69 | void acpi_lpss_init(void); | 76 | void acpi_lpss_init(void); |
70 | 77 | ||
78 | void acpi_apd_init(void); | ||
79 | |||
71 | acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src); | 80 | acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src); |
72 | bool acpi_queue_hotplug_work(struct work_struct *work); | 81 | bool acpi_queue_hotplug_work(struct work_struct *work); |
73 | void acpi_device_hotplug(struct acpi_device *adev, u32 src); | 82 | void acpi_device_hotplug(struct acpi_device *adev, u32 src); |
@@ -122,11 +131,13 @@ struct acpi_ec { | |||
122 | unsigned long data_addr; | 131 | unsigned long data_addr; |
123 | unsigned long global_lock; | 132 | unsigned long global_lock; |
124 | unsigned long flags; | 133 | unsigned long flags; |
134 | unsigned long reference_count; | ||
125 | struct mutex mutex; | 135 | struct mutex mutex; |
126 | wait_queue_head_t wait; | 136 | wait_queue_head_t wait; |
127 | struct list_head list; | 137 | struct list_head list; |
128 | struct transaction *curr; | 138 | struct transaction *curr; |
129 | spinlock_t lock; | 139 | spinlock_t lock; |
140 | struct work_struct work; | ||
130 | }; | 141 | }; |
131 | 142 | ||
132 | extern struct acpi_ec *first_ec; | 143 | extern struct acpi_ec *first_ec; |
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c new file mode 100644 index 000000000000..ccdc8db16bb8 --- /dev/null +++ b/drivers/acpi/ioapic.c | |||
@@ -0,0 +1,229 @@ | |||
1 | /* | ||
2 | * IOAPIC/IOxAPIC/IOSAPIC driver | ||
3 | * | ||
4 | * Copyright (C) 2009 Fujitsu Limited. | ||
5 | * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. | ||
6 | * | ||
7 | * Copyright (C) 2014 Intel Corporation | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * Based on original drivers/pci/ioapic.c | ||
14 | * Yinghai Lu <yinghai@kernel.org> | ||
15 | * Jiang Liu <jiang.liu@intel.com> | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * This driver manages I/O APICs added by hotplug after boot. | ||
20 | * We try to claim all I/O APIC devices, but those present at boot were | ||
21 | * registered when we parsed the ACPI MADT. | ||
22 | */ | ||
23 | |||
24 | #define pr_fmt(fmt) "ACPI : IOAPIC: " fmt | ||
25 | |||
26 | #include <linux/slab.h> | ||
27 | #include <linux/acpi.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <acpi/acpi.h> | ||
30 | |||
31 | struct acpi_pci_ioapic { | ||
32 | acpi_handle root_handle; | ||
33 | acpi_handle handle; | ||
34 | u32 gsi_base; | ||
35 | struct resource res; | ||
36 | struct pci_dev *pdev; | ||
37 | struct list_head list; | ||
38 | }; | ||
39 | |||
40 | static LIST_HEAD(ioapic_list); | ||
41 | static DEFINE_MUTEX(ioapic_list_lock); | ||
42 | |||
43 | static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) | ||
44 | { | ||
45 | struct resource *res = data; | ||
46 | struct resource_win win; | ||
47 | |||
48 | res->flags = 0; | ||
49 | if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0) | ||
50 | return AE_OK; | ||
51 | |||
52 | if (!acpi_dev_resource_memory(acpi_res, res)) { | ||
53 | if (acpi_dev_resource_address_space(acpi_res, &win) || | ||
54 | acpi_dev_resource_ext_address_space(acpi_res, &win)) | ||
55 | *res = win.res; | ||
56 | } | ||
57 | if ((res->flags & IORESOURCE_PREFETCH) || | ||
58 | (res->flags & IORESOURCE_DISABLED)) | ||
59 | res->flags = 0; | ||
60 | |||
61 | return AE_CTRL_TERMINATE; | ||
62 | } | ||
63 | |||
64 | static bool acpi_is_ioapic(acpi_handle handle, char **type) | ||
65 | { | ||
66 | acpi_status status; | ||
67 | struct acpi_device_info *info; | ||
68 | char *hid = NULL; | ||
69 | bool match = false; | ||
70 | |||
71 | if (!acpi_has_method(handle, "_GSB")) | ||
72 | return false; | ||
73 | |||
74 | status = acpi_get_object_info(handle, &info); | ||
75 | if (ACPI_SUCCESS(status)) { | ||
76 | if (info->valid & ACPI_VALID_HID) | ||
77 | hid = info->hardware_id.string; | ||
78 | if (hid) { | ||
79 | if (strcmp(hid, "ACPI0009") == 0) { | ||
80 | *type = "IOxAPIC"; | ||
81 | match = true; | ||
82 | } else if (strcmp(hid, "ACPI000A") == 0) { | ||
83 | *type = "IOAPIC"; | ||
84 | match = true; | ||
85 | } | ||
86 | } | ||
87 | kfree(info); | ||
88 | } | ||
89 | |||
90 | return match; | ||
91 | } | ||
92 | |||
93 | static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl, | ||
94 | void *context, void **rv) | ||
95 | { | ||
96 | acpi_status status; | ||
97 | unsigned long long gsi_base; | ||
98 | struct acpi_pci_ioapic *ioapic; | ||
99 | struct pci_dev *dev = NULL; | ||
100 | struct resource *res = NULL; | ||
101 | char *type = NULL; | ||
102 | |||
103 | if (!acpi_is_ioapic(handle, &type)) | ||
104 | return AE_OK; | ||
105 | |||
106 | mutex_lock(&ioapic_list_lock); | ||
107 | list_for_each_entry(ioapic, &ioapic_list, list) | ||
108 | if (ioapic->handle == handle) { | ||
109 | mutex_unlock(&ioapic_list_lock); | ||
110 | return AE_OK; | ||
111 | } | ||
112 | |||
113 | status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsi_base); | ||
114 | if (ACPI_FAILURE(status)) { | ||
115 | acpi_handle_warn(handle, "failed to evaluate _GSB method\n"); | ||
116 | goto exit; | ||
117 | } | ||
118 | |||
119 | ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL); | ||
120 | if (!ioapic) { | ||
121 | pr_err("cannot allocate memory for new IOAPIC\n"); | ||
122 | goto exit; | ||
123 | } else { | ||
124 | ioapic->root_handle = (acpi_handle)context; | ||
125 | ioapic->handle = handle; | ||
126 | ioapic->gsi_base = (u32)gsi_base; | ||
127 | INIT_LIST_HEAD(&ioapic->list); | ||
128 | } | ||
129 | |||
130 | if (acpi_ioapic_registered(handle, (u32)gsi_base)) | ||
131 | goto done; | ||
132 | |||
133 | dev = acpi_get_pci_dev(handle); | ||
134 | if (dev && pci_resource_len(dev, 0)) { | ||
135 | if (pci_enable_device(dev) < 0) | ||
136 | goto exit_put; | ||
137 | pci_set_master(dev); | ||
138 | if (pci_request_region(dev, 0, type)) | ||
139 | goto exit_disable; | ||
140 | res = &dev->resource[0]; | ||
141 | ioapic->pdev = dev; | ||
142 | } else { | ||
143 | pci_dev_put(dev); | ||
144 | dev = NULL; | ||
145 | |||
146 | res = &ioapic->res; | ||
147 | acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, res); | ||
148 | if (res->flags == 0) { | ||
149 | acpi_handle_warn(handle, "failed to get resource\n"); | ||
150 | goto exit_free; | ||
151 | } else if (request_resource(&iomem_resource, res)) { | ||
152 | acpi_handle_warn(handle, "failed to insert resource\n"); | ||
153 | goto exit_free; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) { | ||
158 | acpi_handle_warn(handle, "failed to register IOAPIC\n"); | ||
159 | goto exit_release; | ||
160 | } | ||
161 | done: | ||
162 | list_add(&ioapic->list, &ioapic_list); | ||
163 | mutex_unlock(&ioapic_list_lock); | ||
164 | |||
165 | if (dev) | ||
166 | dev_info(&dev->dev, "%s at %pR, GSI %u\n", | ||
167 | type, res, (u32)gsi_base); | ||
168 | else | ||
169 | acpi_handle_info(handle, "%s at %pR, GSI %u\n", | ||
170 | type, res, (u32)gsi_base); | ||
171 | |||
172 | return AE_OK; | ||
173 | |||
174 | exit_release: | ||
175 | if (dev) | ||
176 | pci_release_region(dev, 0); | ||
177 | else | ||
178 | release_resource(res); | ||
179 | exit_disable: | ||
180 | if (dev) | ||
181 | pci_disable_device(dev); | ||
182 | exit_put: | ||
183 | pci_dev_put(dev); | ||
184 | exit_free: | ||
185 | kfree(ioapic); | ||
186 | exit: | ||
187 | mutex_unlock(&ioapic_list_lock); | ||
188 | *(acpi_status *)rv = AE_ERROR; | ||
189 | return AE_OK; | ||
190 | } | ||
191 | |||
192 | int acpi_ioapic_add(struct acpi_pci_root *root) | ||
193 | { | ||
194 | acpi_status status, retval = AE_OK; | ||
195 | |||
196 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root->device->handle, | ||
197 | UINT_MAX, handle_ioapic_add, NULL, | ||
198 | root->device->handle, (void **)&retval); | ||
199 | |||
200 | return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; | ||
201 | } | ||
202 | |||
203 | int acpi_ioapic_remove(struct acpi_pci_root *root) | ||
204 | { | ||
205 | int retval = 0; | ||
206 | struct acpi_pci_ioapic *ioapic, *tmp; | ||
207 | |||
208 | mutex_lock(&ioapic_list_lock); | ||
209 | list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { | ||
210 | if (root->device->handle != ioapic->root_handle) | ||
211 | continue; | ||
212 | |||
213 | if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) | ||
214 | retval = -EBUSY; | ||
215 | |||
216 | if (ioapic->pdev) { | ||
217 | pci_release_region(ioapic->pdev, 0); | ||
218 | pci_disable_device(ioapic->pdev); | ||
219 | pci_dev_put(ioapic->pdev); | ||
220 | } else if (ioapic->res.flags && ioapic->res.parent) { | ||
221 | release_resource(&ioapic->res); | ||
222 | } | ||
223 | list_del(&ioapic->list); | ||
224 | kfree(ioapic); | ||
225 | } | ||
226 | mutex_unlock(&ioapic_list_lock); | ||
227 | |||
228 | return retval; | ||
229 | } | ||
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 24b5476449a1..1333cbdc3ea2 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -177,12 +177,7 @@ static int __init slit_valid(struct acpi_table_slit *slit) | |||
177 | 177 | ||
178 | static int __init acpi_parse_slit(struct acpi_table_header *table) | 178 | static int __init acpi_parse_slit(struct acpi_table_header *table) |
179 | { | 179 | { |
180 | struct acpi_table_slit *slit; | 180 | struct acpi_table_slit *slit = (struct acpi_table_slit *)table; |
181 | |||
182 | if (!table) | ||
183 | return -EINVAL; | ||
184 | |||
185 | slit = (struct acpi_table_slit *)table; | ||
186 | 181 | ||
187 | if (!slit_valid(slit)) { | 182 | if (!slit_valid(slit)) { |
188 | printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); | 183 | printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); |
@@ -260,11 +255,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, | |||
260 | 255 | ||
261 | static int __init acpi_parse_srat(struct acpi_table_header *table) | 256 | static int __init acpi_parse_srat(struct acpi_table_header *table) |
262 | { | 257 | { |
263 | struct acpi_table_srat *srat; | 258 | struct acpi_table_srat *srat = (struct acpi_table_srat *)table; |
264 | if (!table) | ||
265 | return -EINVAL; | ||
266 | 259 | ||
267 | srat = (struct acpi_table_srat *)table; | ||
268 | acpi_srat_revision = srat->header.revision; | 260 | acpi_srat_revision = srat->header.revision; |
269 | 261 | ||
270 | /* Real work done in acpi_table_parse_srat below. */ | 262 | /* Real work done in acpi_table_parse_srat below. */ |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index b1def411c0b8..e7f718d6918a 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -485,14 +485,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
485 | if (!pin || !dev->irq_managed || dev->irq <= 0) | 485 | if (!pin || !dev->irq_managed || dev->irq <= 0) |
486 | return; | 486 | return; |
487 | 487 | ||
488 | /* Keep IOAPIC pin configuration when suspending */ | ||
489 | if (dev->dev.power.is_prepared) | ||
490 | return; | ||
491 | #ifdef CONFIG_PM | ||
492 | if (dev->dev.power.runtime_status == RPM_SUSPENDING) | ||
493 | return; | ||
494 | #endif | ||
495 | |||
496 | entry = acpi_pci_irq_lookup(dev, pin); | 488 | entry = acpi_pci_irq_lookup(dev, pin); |
497 | if (!entry) | 489 | if (!entry) |
498 | return; | 490 | return; |
@@ -513,5 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
513 | if (gsi >= 0) { | 505 | if (gsi >= 0) { |
514 | acpi_unregister_gsi(gsi); | 506 | acpi_unregister_gsi(gsi); |
515 | dev->irq_managed = 0; | 507 | dev->irq_managed = 0; |
508 | dev->irq = 0; | ||
516 | } | 509 | } |
517 | } | 510 | } |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index c6bcb8c719d8..68a5f712cd19 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -112,10 +112,10 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) | |||
112 | if (ACPI_FAILURE(status)) | 112 | if (ACPI_FAILURE(status)) |
113 | return AE_OK; | 113 | return AE_OK; |
114 | 114 | ||
115 | if ((address.address_length > 0) && | 115 | if ((address.address.address_length > 0) && |
116 | (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { | 116 | (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { |
117 | res->start = address.minimum; | 117 | res->start = address.address.minimum; |
118 | res->end = address.minimum + address.address_length - 1; | 118 | res->end = address.address.minimum + address.address.address_length - 1; |
119 | } | 119 | } |
120 | 120 | ||
121 | return AE_OK; | 121 | return AE_OK; |
@@ -621,6 +621,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
621 | if (hotadd) { | 621 | if (hotadd) { |
622 | pcibios_resource_survey_bus(root->bus); | 622 | pcibios_resource_survey_bus(root->bus); |
623 | pci_assign_unassigned_root_bus_resources(root->bus); | 623 | pci_assign_unassigned_root_bus_resources(root->bus); |
624 | acpi_ioapic_add(root); | ||
624 | } | 625 | } |
625 | 626 | ||
626 | pci_lock_rescan_remove(); | 627 | pci_lock_rescan_remove(); |
@@ -644,6 +645,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) | |||
644 | 645 | ||
645 | pci_stop_root_bus(root->bus); | 646 | pci_stop_root_bus(root->bus); |
646 | 647 | ||
648 | WARN_ON(acpi_ioapic_remove(root)); | ||
649 | |||
647 | device_set_run_wake(root->bus->bridge, false); | 650 | device_set_run_wake(root->bus->bridge, false); |
648 | pci_acpi_remove_bus_pm_notifier(device); | 651 | pci_acpi_remove_bus_pm_notifier(device); |
649 | 652 | ||
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 02e48394276c..7962651cdbd4 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -4,6 +4,10 @@ | |||
4 | * | 4 | * |
5 | * Alex Chiang <achiang@hp.com> | 5 | * Alex Chiang <achiang@hp.com> |
6 | * - Unified x86/ia64 implementations | 6 | * - Unified x86/ia64 implementations |
7 | * | ||
8 | * I/O APIC hotplug support | ||
9 | * Yinghai Lu <yinghai@kernel.org> | ||
10 | * Jiang Liu <jiang.liu@intel.com> | ||
7 | */ | 11 | */ |
8 | #include <linux/export.h> | 12 | #include <linux/export.h> |
9 | #include <linux/acpi.h> | 13 | #include <linux/acpi.h> |
@@ -12,6 +16,21 @@ | |||
12 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT | 16 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT |
13 | ACPI_MODULE_NAME("processor_core"); | 17 | ACPI_MODULE_NAME("processor_core"); |
14 | 18 | ||
19 | static struct acpi_table_madt *get_madt_table(void) | ||
20 | { | ||
21 | static struct acpi_table_madt *madt; | ||
22 | static int read_madt; | ||
23 | |||
24 | if (!read_madt) { | ||
25 | if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, | ||
26 | (struct acpi_table_header **)&madt))) | ||
27 | madt = NULL; | ||
28 | read_madt++; | ||
29 | } | ||
30 | |||
31 | return madt; | ||
32 | } | ||
33 | |||
15 | static int map_lapic_id(struct acpi_subtable_header *entry, | 34 | static int map_lapic_id(struct acpi_subtable_header *entry, |
16 | u32 acpi_id, int *apic_id) | 35 | u32 acpi_id, int *apic_id) |
17 | { | 36 | { |
@@ -67,17 +86,10 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, | |||
67 | static int map_madt_entry(int type, u32 acpi_id) | 86 | static int map_madt_entry(int type, u32 acpi_id) |
68 | { | 87 | { |
69 | unsigned long madt_end, entry; | 88 | unsigned long madt_end, entry; |
70 | static struct acpi_table_madt *madt; | ||
71 | static int read_madt; | ||
72 | int phys_id = -1; /* CPU hardware ID */ | 89 | int phys_id = -1; /* CPU hardware ID */ |
90 | struct acpi_table_madt *madt; | ||
73 | 91 | ||
74 | if (!read_madt) { | 92 | madt = get_madt_table(); |
75 | if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, | ||
76 | (struct acpi_table_header **)&madt))) | ||
77 | madt = NULL; | ||
78 | read_madt++; | ||
79 | } | ||
80 | |||
81 | if (!madt) | 93 | if (!madt) |
82 | return phys_id; | 94 | return phys_id; |
83 | 95 | ||
@@ -203,3 +215,96 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) | |||
203 | return acpi_map_cpuid(phys_id, acpi_id); | 215 | return acpi_map_cpuid(phys_id, acpi_id); |
204 | } | 216 | } |
205 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); | 217 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); |
218 | |||
219 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | ||
220 | static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, | ||
221 | u64 *phys_addr, int *ioapic_id) | ||
222 | { | ||
223 | struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry; | ||
224 | |||
225 | if (ioapic->global_irq_base != gsi_base) | ||
226 | return 0; | ||
227 | |||
228 | *phys_addr = ioapic->address; | ||
229 | *ioapic_id = ioapic->id; | ||
230 | return 1; | ||
231 | } | ||
232 | |||
233 | static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr) | ||
234 | { | ||
235 | struct acpi_subtable_header *hdr; | ||
236 | unsigned long madt_end, entry; | ||
237 | struct acpi_table_madt *madt; | ||
238 | int apic_id = -1; | ||
239 | |||
240 | madt = get_madt_table(); | ||
241 | if (!madt) | ||
242 | return apic_id; | ||
243 | |||
244 | entry = (unsigned long)madt; | ||
245 | madt_end = entry + madt->header.length; | ||
246 | |||
247 | /* Parse all entries looking for a match. */ | ||
248 | entry += sizeof(struct acpi_table_madt); | ||
249 | while (entry + sizeof(struct acpi_subtable_header) < madt_end) { | ||
250 | hdr = (struct acpi_subtable_header *)entry; | ||
251 | if (hdr->type == ACPI_MADT_TYPE_IO_APIC && | ||
252 | get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id)) | ||
253 | break; | ||
254 | else | ||
255 | entry += hdr->length; | ||
256 | } | ||
257 | |||
258 | return apic_id; | ||
259 | } | ||
260 | |||
261 | static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base, | ||
262 | u64 *phys_addr) | ||
263 | { | ||
264 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
265 | struct acpi_subtable_header *header; | ||
266 | union acpi_object *obj; | ||
267 | int apic_id = -1; | ||
268 | |||
269 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | ||
270 | goto exit; | ||
271 | |||
272 | if (!buffer.length || !buffer.pointer) | ||
273 | goto exit; | ||
274 | |||
275 | obj = buffer.pointer; | ||
276 | if (obj->type != ACPI_TYPE_BUFFER || | ||
277 | obj->buffer.length < sizeof(struct acpi_subtable_header)) | ||
278 | goto exit; | ||
279 | |||
280 | header = (struct acpi_subtable_header *)obj->buffer.pointer; | ||
281 | if (header->type == ACPI_MADT_TYPE_IO_APIC) | ||
282 | get_ioapic_id(header, gsi_base, phys_addr, &apic_id); | ||
283 | |||
284 | exit: | ||
285 | kfree(buffer.pointer); | ||
286 | return apic_id; | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base | ||
291 | * @handle: ACPI object for IOAPIC device | ||
292 | * @gsi_base: GSI base to match with | ||
293 | * @phys_addr: Pointer to store physical address of matching IOAPIC record | ||
294 | * | ||
295 | * Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search | ||
296 | * for an ACPI IOAPIC record matching @gsi_base. | ||
297 | * Return IOAPIC id and store physical address in @phys_addr if found a match, | ||
298 | * otherwise return <0. | ||
299 | */ | ||
300 | int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr) | ||
301 | { | ||
302 | int apic_id; | ||
303 | |||
304 | apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr); | ||
305 | if (apic_id == -1) | ||
306 | apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr); | ||
307 | |||
308 | return apic_id; | ||
309 | } | ||
310 | #endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */ | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 87b704e41877..c256bd7fbd78 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void) | |||
681 | } | 681 | } |
682 | 682 | ||
683 | /** | 683 | /** |
684 | * acpi_idle_do_entry - a helper function that does C2 and C3 type entry | 684 | * acpi_idle_do_entry - enter idle state using the appropriate method |
685 | * @cx: cstate data | 685 | * @cx: cstate data |
686 | * | 686 | * |
687 | * Caller disables interrupt before call and enables interrupt after return. | 687 | * Caller disables interrupt before call and enables interrupt after return. |
688 | */ | 688 | */ |
689 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | 689 | static void acpi_idle_do_entry(struct acpi_processor_cx *cx) |
690 | { | 690 | { |
691 | /* Don't trace irqs off for idle */ | ||
692 | stop_critical_timings(); | ||
693 | if (cx->entry_method == ACPI_CSTATE_FFH) { | 691 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
694 | /* Call into architectural FFH based C-state */ | 692 | /* Call into architectural FFH based C-state */ |
695 | acpi_processor_ffh_cstate_enter(cx); | 693 | acpi_processor_ffh_cstate_enter(cx); |
@@ -703,38 +701,9 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
703 | gets asserted in time to freeze execution properly. */ | 701 | gets asserted in time to freeze execution properly. */ |
704 | inl(acpi_gbl_FADT.xpm_timer_block.address); | 702 | inl(acpi_gbl_FADT.xpm_timer_block.address); |
705 | } | 703 | } |
706 | start_critical_timings(); | ||
707 | } | 704 | } |
708 | 705 | ||
709 | /** | 706 | /** |
710 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | ||
711 | * @dev: the target CPU | ||
712 | * @drv: cpuidle driver containing cpuidle state info | ||
713 | * @index: index of target state | ||
714 | * | ||
715 | * This is equivalent to the HALT instruction. | ||
716 | */ | ||
717 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | ||
718 | struct cpuidle_driver *drv, int index) | ||
719 | { | ||
720 | struct acpi_processor *pr; | ||
721 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
722 | |||
723 | pr = __this_cpu_read(processors); | ||
724 | |||
725 | if (unlikely(!pr)) | ||
726 | return -EINVAL; | ||
727 | |||
728 | lapic_timer_state_broadcast(pr, cx, 1); | ||
729 | acpi_idle_do_entry(cx); | ||
730 | |||
731 | lapic_timer_state_broadcast(pr, cx, 0); | ||
732 | |||
733 | return index; | ||
734 | } | ||
735 | |||
736 | |||
737 | /** | ||
738 | * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) | 707 | * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) |
739 | * @dev: the target CPU | 708 | * @dev: the target CPU |
740 | * @index: the index of suggested state | 709 | * @index: the index of suggested state |
@@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) | |||
761 | return 0; | 730 | return 0; |
762 | } | 731 | } |
763 | 732 | ||
764 | /** | 733 | static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) |
765 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | ||
766 | * @dev: the target CPU | ||
767 | * @drv: cpuidle driver with cpuidle state information | ||
768 | * @index: the index of suggested state | ||
769 | */ | ||
770 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | ||
771 | struct cpuidle_driver *drv, int index) | ||
772 | { | 734 | { |
773 | struct acpi_processor *pr; | 735 | return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 && |
774 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | 736 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) && |
775 | 737 | !pr->flags.has_cst; | |
776 | pr = __this_cpu_read(processors); | ||
777 | |||
778 | if (unlikely(!pr)) | ||
779 | return -EINVAL; | ||
780 | |||
781 | #ifdef CONFIG_HOTPLUG_CPU | ||
782 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
783 | !pr->flags.has_cst && | ||
784 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
785 | return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START); | ||
786 | #endif | ||
787 | |||
788 | /* | ||
789 | * Must be done before busmaster disable as we might need to | ||
790 | * access HPET ! | ||
791 | */ | ||
792 | lapic_timer_state_broadcast(pr, cx, 1); | ||
793 | |||
794 | if (cx->type == ACPI_STATE_C3) | ||
795 | ACPI_FLUSH_CPU_CACHE(); | ||
796 | |||
797 | /* Tell the scheduler that we are going deep-idle: */ | ||
798 | sched_clock_idle_sleep_event(); | ||
799 | acpi_idle_do_entry(cx); | ||
800 | |||
801 | sched_clock_idle_wakeup_event(0); | ||
802 | |||
803 | lapic_timer_state_broadcast(pr, cx, 0); | ||
804 | return index; | ||
805 | } | 738 | } |
806 | 739 | ||
807 | static int c3_cpu_count; | 740 | static int c3_cpu_count; |
@@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock); | |||
809 | 742 | ||
810 | /** | 743 | /** |
811 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 744 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
812 | * @dev: the target CPU | 745 | * @pr: Target processor |
813 | * @drv: cpuidle driver containing state data | 746 | * @cx: Target state context |
814 | * @index: the index of suggested state | ||
815 | * | ||
816 | * If BM is detected, the deepest non-C3 idle state is entered instead. | ||
817 | */ | 747 | */ |
818 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | 748 | static void acpi_idle_enter_bm(struct acpi_processor *pr, |
819 | struct cpuidle_driver *drv, int index) | 749 | struct acpi_processor_cx *cx) |
820 | { | 750 | { |
821 | struct acpi_processor *pr; | ||
822 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
823 | |||
824 | pr = __this_cpu_read(processors); | ||
825 | |||
826 | if (unlikely(!pr)) | ||
827 | return -EINVAL; | ||
828 | |||
829 | #ifdef CONFIG_HOTPLUG_CPU | ||
830 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
831 | !pr->flags.has_cst && | ||
832 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
833 | return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START); | ||
834 | #endif | ||
835 | |||
836 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { | ||
837 | if (drv->safe_state_index >= 0) { | ||
838 | return drv->states[drv->safe_state_index].enter(dev, | ||
839 | drv, drv->safe_state_index); | ||
840 | } else { | ||
841 | acpi_safe_halt(); | ||
842 | return -EBUSY; | ||
843 | } | ||
844 | } | ||
845 | |||
846 | acpi_unlazy_tlb(smp_processor_id()); | 751 | acpi_unlazy_tlb(smp_processor_id()); |
847 | 752 | ||
848 | /* Tell the scheduler that we are going deep-idle: */ | ||
849 | sched_clock_idle_sleep_event(); | ||
850 | /* | 753 | /* |
851 | * Must be done before busmaster disable as we might need to | 754 | * Must be done before busmaster disable as we might need to |
852 | * access HPET ! | 755 | * access HPET ! |
@@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
856 | /* | 759 | /* |
857 | * disable bus master | 760 | * disable bus master |
858 | * bm_check implies we need ARB_DIS | 761 | * bm_check implies we need ARB_DIS |
859 | * !bm_check implies we need cache flush | ||
860 | * bm_control implies whether we can do ARB_DIS | 762 | * bm_control implies whether we can do ARB_DIS |
861 | * | 763 | * |
862 | * That leaves a case where bm_check is set and bm_control is | 764 | * That leaves a case where bm_check is set and bm_control is |
863 | * not set. In that case we cannot do much, we enter C3 | 765 | * not set. In that case we cannot do much, we enter C3 |
864 | * without doing anything. | 766 | * without doing anything. |
865 | */ | 767 | */ |
866 | if (pr->flags.bm_check && pr->flags.bm_control) { | 768 | if (pr->flags.bm_control) { |
867 | raw_spin_lock(&c3_lock); | 769 | raw_spin_lock(&c3_lock); |
868 | c3_cpu_count++; | 770 | c3_cpu_count++; |
869 | /* Disable bus master arbitration when all CPUs are in C3 */ | 771 | /* Disable bus master arbitration when all CPUs are in C3 */ |
870 | if (c3_cpu_count == num_online_cpus()) | 772 | if (c3_cpu_count == num_online_cpus()) |
871 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); | 773 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); |
872 | raw_spin_unlock(&c3_lock); | 774 | raw_spin_unlock(&c3_lock); |
873 | } else if (!pr->flags.bm_check) { | ||
874 | ACPI_FLUSH_CPU_CACHE(); | ||
875 | } | 775 | } |
876 | 776 | ||
877 | acpi_idle_do_entry(cx); | 777 | acpi_idle_do_entry(cx); |
878 | 778 | ||
879 | /* Re-enable bus master arbitration */ | 779 | /* Re-enable bus master arbitration */ |
880 | if (pr->flags.bm_check && pr->flags.bm_control) { | 780 | if (pr->flags.bm_control) { |
881 | raw_spin_lock(&c3_lock); | 781 | raw_spin_lock(&c3_lock); |
882 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); | 782 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); |
883 | c3_cpu_count--; | 783 | c3_cpu_count--; |
884 | raw_spin_unlock(&c3_lock); | 784 | raw_spin_unlock(&c3_lock); |
885 | } | 785 | } |
886 | 786 | ||
887 | sched_clock_idle_wakeup_event(0); | 787 | lapic_timer_state_broadcast(pr, cx, 0); |
788 | } | ||
789 | |||
790 | static int acpi_idle_enter(struct cpuidle_device *dev, | ||
791 | struct cpuidle_driver *drv, int index) | ||
792 | { | ||
793 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
794 | struct acpi_processor *pr; | ||
795 | |||
796 | pr = __this_cpu_read(processors); | ||
797 | if (unlikely(!pr)) | ||
798 | return -EINVAL; | ||
799 | |||
800 | if (cx->type != ACPI_STATE_C1) { | ||
801 | if (acpi_idle_fallback_to_c1(pr)) { | ||
802 | index = CPUIDLE_DRIVER_STATE_START; | ||
803 | cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
804 | } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { | ||
805 | if (cx->bm_sts_skip || !acpi_idle_bm_check()) { | ||
806 | acpi_idle_enter_bm(pr, cx); | ||
807 | return index; | ||
808 | } else if (drv->safe_state_index >= 0) { | ||
809 | index = drv->safe_state_index; | ||
810 | cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
811 | } else { | ||
812 | acpi_safe_halt(); | ||
813 | return -EBUSY; | ||
814 | } | ||
815 | } | ||
816 | } | ||
817 | |||
818 | lapic_timer_state_broadcast(pr, cx, 1); | ||
819 | |||
820 | if (cx->type == ACPI_STATE_C3) | ||
821 | ACPI_FLUSH_CPU_CACHE(); | ||
822 | |||
823 | acpi_idle_do_entry(cx); | ||
888 | 824 | ||
889 | lapic_timer_state_broadcast(pr, cx, 0); | 825 | lapic_timer_state_broadcast(pr, cx, 0); |
826 | |||
890 | return index; | 827 | return index; |
891 | } | 828 | } |
892 | 829 | ||
@@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | |||
981 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); | 918 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); |
982 | state->exit_latency = cx->latency; | 919 | state->exit_latency = cx->latency; |
983 | state->target_residency = cx->latency * latency_factor; | 920 | state->target_residency = cx->latency * latency_factor; |
921 | state->enter = acpi_idle_enter; | ||
984 | 922 | ||
985 | state->flags = 0; | 923 | state->flags = 0; |
986 | switch (cx->type) { | 924 | if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) { |
987 | case ACPI_STATE_C1: | ||
988 | |||
989 | state->enter = acpi_idle_enter_c1; | ||
990 | state->enter_dead = acpi_idle_play_dead; | ||
991 | drv->safe_state_index = count; | ||
992 | break; | ||
993 | |||
994 | case ACPI_STATE_C2: | ||
995 | state->enter = acpi_idle_enter_simple; | ||
996 | state->enter_dead = acpi_idle_play_dead; | 925 | state->enter_dead = acpi_idle_play_dead; |
997 | drv->safe_state_index = count; | 926 | drv->safe_state_index = count; |
998 | break; | ||
999 | |||
1000 | case ACPI_STATE_C3: | ||
1001 | state->enter = pr->flags.bm_check ? | ||
1002 | acpi_idle_enter_bm : | ||
1003 | acpi_idle_enter_simple; | ||
1004 | break; | ||
1005 | } | 927 | } |
1006 | 928 | ||
1007 | count++; | 929 | count++; |
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 782a0d15c25f..4752b9939987 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c | |||
@@ -34,21 +34,34 @@ | |||
34 | #define valid_IRQ(i) (true) | 34 | #define valid_IRQ(i) (true) |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect, | 37 | static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) |
38 | bool window) | ||
39 | { | 38 | { |
40 | unsigned long flags = IORESOURCE_MEM; | 39 | u64 reslen = end - start + 1; |
41 | 40 | ||
42 | if (len == 0) | 41 | /* |
43 | flags |= IORESOURCE_DISABLED; | 42 | * CHECKME: len might be required to check versus a minimum |
43 | * length as well. 1 for io is fine, but for memory it does | ||
44 | * not make any sense at all. | ||
45 | */ | ||
46 | if (len && reslen && reslen == len && start <= end) | ||
47 | return true; | ||
44 | 48 | ||
45 | if (write_protect == ACPI_READ_WRITE_MEMORY) | 49 | pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", |
46 | flags |= IORESOURCE_MEM_WRITEABLE; | 50 | io ? "io" : "mem", start, end, len); |
51 | |||
52 | return false; | ||
53 | } | ||
54 | |||
55 | static void acpi_dev_memresource_flags(struct resource *res, u64 len, | ||
56 | u8 write_protect) | ||
57 | { | ||
58 | res->flags = IORESOURCE_MEM; | ||
47 | 59 | ||
48 | if (window) | 60 | if (!acpi_dev_resource_len_valid(res->start, res->end, len, false)) |
49 | flags |= IORESOURCE_WINDOW; | 61 | res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; |
50 | 62 | ||
51 | return flags; | 63 | if (write_protect == ACPI_READ_WRITE_MEMORY) |
64 | res->flags |= IORESOURCE_MEM_WRITEABLE; | ||
52 | } | 65 | } |
53 | 66 | ||
54 | static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, | 67 | static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, |
@@ -56,7 +69,7 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, | |||
56 | { | 69 | { |
57 | res->start = start; | 70 | res->start = start; |
58 | res->end = start + len - 1; | 71 | res->end = start + len - 1; |
59 | res->flags = acpi_dev_memresource_flags(len, write_protect, false); | 72 | acpi_dev_memresource_flags(res, len, write_protect); |
60 | } | 73 | } |
61 | 74 | ||
62 | /** | 75 | /** |
@@ -67,6 +80,11 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, | |||
67 | * Check if the given ACPI resource object represents a memory resource and | 80 | * Check if the given ACPI resource object represents a memory resource and |
68 | * if that's the case, use the information in it to populate the generic | 81 | * if that's the case, use the information in it to populate the generic |
69 | * resource object pointed to by @res. | 82 | * resource object pointed to by @res. |
83 | * | ||
84 | * Return: | ||
85 | * 1) false with res->flags setting to zero: not the expected resource type | ||
86 | * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource | ||
87 | * 3) true: valid assigned resource | ||
70 | */ | 88 | */ |
71 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) | 89 | bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) |
72 | { | 90 | { |
@@ -77,60 +95,52 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) | |||
77 | switch (ares->type) { | 95 | switch (ares->type) { |
78 | case ACPI_RESOURCE_TYPE_MEMORY24: | 96 | case ACPI_RESOURCE_TYPE_MEMORY24: |
79 | memory24 = &ares->data.memory24; | 97 | memory24 = &ares->data.memory24; |
80 | if (!memory24->minimum && !memory24->address_length) | 98 | acpi_dev_get_memresource(res, memory24->minimum << 8, |
81 | return false; | 99 | memory24->address_length << 8, |
82 | acpi_dev_get_memresource(res, memory24->minimum, | ||
83 | memory24->address_length, | ||
84 | memory24->write_protect); | 100 | memory24->write_protect); |
85 | break; | 101 | break; |
86 | case ACPI_RESOURCE_TYPE_MEMORY32: | 102 | case ACPI_RESOURCE_TYPE_MEMORY32: |
87 | memory32 = &ares->data.memory32; | 103 | memory32 = &ares->data.memory32; |
88 | if (!memory32->minimum && !memory32->address_length) | ||
89 | return false; | ||
90 | acpi_dev_get_memresource(res, memory32->minimum, | 104 | acpi_dev_get_memresource(res, memory32->minimum, |
91 | memory32->address_length, | 105 | memory32->address_length, |
92 | memory32->write_protect); | 106 | memory32->write_protect); |
93 | break; | 107 | break; |
94 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | 108 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: |
95 | fixed_memory32 = &ares->data.fixed_memory32; | 109 | fixed_memory32 = &ares->data.fixed_memory32; |
96 | if (!fixed_memory32->address && !fixed_memory32->address_length) | ||
97 | return false; | ||
98 | acpi_dev_get_memresource(res, fixed_memory32->address, | 110 | acpi_dev_get_memresource(res, fixed_memory32->address, |
99 | fixed_memory32->address_length, | 111 | fixed_memory32->address_length, |
100 | fixed_memory32->write_protect); | 112 | fixed_memory32->write_protect); |
101 | break; | 113 | break; |
102 | default: | 114 | default: |
115 | res->flags = 0; | ||
103 | return false; | 116 | return false; |
104 | } | 117 | } |
105 | return true; | 118 | |
119 | return !(res->flags & IORESOURCE_DISABLED); | ||
106 | } | 120 | } |
107 | EXPORT_SYMBOL_GPL(acpi_dev_resource_memory); | 121 | EXPORT_SYMBOL_GPL(acpi_dev_resource_memory); |
108 | 122 | ||
109 | static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode, | 123 | static void acpi_dev_ioresource_flags(struct resource *res, u64 len, |
110 | bool window) | 124 | u8 io_decode) |
111 | { | 125 | { |
112 | int flags = IORESOURCE_IO; | 126 | res->flags = IORESOURCE_IO; |
113 | 127 | ||
114 | if (io_decode == ACPI_DECODE_16) | 128 | if (!acpi_dev_resource_len_valid(res->start, res->end, len, true)) |
115 | flags |= IORESOURCE_IO_16BIT_ADDR; | 129 | res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; |
116 | 130 | ||
117 | if (start > end || end >= 0x10003) | 131 | if (res->end >= 0x10003) |
118 | flags |= IORESOURCE_DISABLED; | 132 | res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; |
119 | 133 | ||
120 | if (window) | 134 | if (io_decode == ACPI_DECODE_16) |
121 | flags |= IORESOURCE_WINDOW; | 135 | res->flags |= IORESOURCE_IO_16BIT_ADDR; |
122 | |||
123 | return flags; | ||
124 | } | 136 | } |
125 | 137 | ||
126 | static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, | 138 | static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, |
127 | u8 io_decode) | 139 | u8 io_decode) |
128 | { | 140 | { |
129 | u64 end = start + len - 1; | ||
130 | |||
131 | res->start = start; | 141 | res->start = start; |
132 | res->end = end; | 142 | res->end = start + len - 1; |
133 | res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false); | 143 | acpi_dev_ioresource_flags(res, len, io_decode); |
134 | } | 144 | } |
135 | 145 | ||
136 | /** | 146 | /** |
@@ -141,6 +151,11 @@ static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, | |||
141 | * Check if the given ACPI resource object represents an I/O resource and | 151 | * Check if the given ACPI resource object represents an I/O resource and |
142 | * if that's the case, use the information in it to populate the generic | 152 | * if that's the case, use the information in it to populate the generic |
143 | * resource object pointed to by @res. | 153 | * resource object pointed to by @res. |
154 | * | ||
155 | * Return: | ||
156 | * 1) false with res->flags setting to zero: not the expected resource type | ||
157 | * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource | ||
158 | * 3) true: valid assigned resource | ||
144 | */ | 159 | */ |
145 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) | 160 | bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) |
146 | { | 161 | { |
@@ -150,135 +165,143 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) | |||
150 | switch (ares->type) { | 165 | switch (ares->type) { |
151 | case ACPI_RESOURCE_TYPE_IO: | 166 | case ACPI_RESOURCE_TYPE_IO: |
152 | io = &ares->data.io; | 167 | io = &ares->data.io; |
153 | if (!io->minimum && !io->address_length) | ||
154 | return false; | ||
155 | acpi_dev_get_ioresource(res, io->minimum, | 168 | acpi_dev_get_ioresource(res, io->minimum, |
156 | io->address_length, | 169 | io->address_length, |
157 | io->io_decode); | 170 | io->io_decode); |
158 | break; | 171 | break; |
159 | case ACPI_RESOURCE_TYPE_FIXED_IO: | 172 | case ACPI_RESOURCE_TYPE_FIXED_IO: |
160 | fixed_io = &ares->data.fixed_io; | 173 | fixed_io = &ares->data.fixed_io; |
161 | if (!fixed_io->address && !fixed_io->address_length) | ||
162 | return false; | ||
163 | acpi_dev_get_ioresource(res, fixed_io->address, | 174 | acpi_dev_get_ioresource(res, fixed_io->address, |
164 | fixed_io->address_length, | 175 | fixed_io->address_length, |
165 | ACPI_DECODE_10); | 176 | ACPI_DECODE_10); |
166 | break; | 177 | break; |
167 | default: | 178 | default: |
179 | res->flags = 0; | ||
168 | return false; | 180 | return false; |
169 | } | 181 | } |
170 | return true; | 182 | |
183 | return !(res->flags & IORESOURCE_DISABLED); | ||
171 | } | 184 | } |
172 | EXPORT_SYMBOL_GPL(acpi_dev_resource_io); | 185 | EXPORT_SYMBOL_GPL(acpi_dev_resource_io); |
173 | 186 | ||
174 | /** | 187 | static bool acpi_decode_space(struct resource_win *win, |
175 | * acpi_dev_resource_address_space - Extract ACPI address space information. | 188 | struct acpi_resource_address *addr, |
176 | * @ares: Input ACPI resource object. | 189 | struct acpi_address64_attribute *attr) |
177 | * @res: Output generic resource object. | ||
178 | * | ||
179 | * Check if the given ACPI resource object represents an address space resource | ||
180 | * and if that's the case, use the information in it to populate the generic | ||
181 | * resource object pointed to by @res. | ||
182 | */ | ||
183 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, | ||
184 | struct resource *res) | ||
185 | { | 190 | { |
186 | acpi_status status; | 191 | u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; |
187 | struct acpi_resource_address64 addr; | 192 | bool wp = addr->info.mem.write_protect; |
188 | bool window; | 193 | u64 len = attr->address_length; |
189 | u64 len; | 194 | struct resource *res = &win->res; |
190 | u8 io_decode; | ||
191 | 195 | ||
192 | switch (ares->type) { | 196 | /* |
193 | case ACPI_RESOURCE_TYPE_ADDRESS16: | 197 | * Filter out invalid descriptor according to ACPI Spec 5.0, section |
194 | case ACPI_RESOURCE_TYPE_ADDRESS32: | 198 | * 6.4.3.5 Address Space Resource Descriptors. |
195 | case ACPI_RESOURCE_TYPE_ADDRESS64: | 199 | */ |
196 | break; | 200 | if ((addr->min_address_fixed != addr->max_address_fixed && len) || |
197 | default: | 201 | (addr->min_address_fixed && addr->max_address_fixed && !len)) |
198 | return false; | 202 | pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", |
199 | } | 203 | addr->min_address_fixed, addr->max_address_fixed, len); |
200 | 204 | ||
201 | status = acpi_resource_to_address64(ares, &addr); | 205 | res->start = attr->minimum; |
202 | if (ACPI_FAILURE(status)) | 206 | res->end = attr->maximum; |
203 | return false; | ||
204 | 207 | ||
205 | res->start = addr.minimum; | 208 | /* |
206 | res->end = addr.maximum; | 209 | * For bridges that translate addresses across the bridge, |
207 | window = addr.producer_consumer == ACPI_PRODUCER; | 210 | * translation_offset is the offset that must be added to the |
211 | * address on the secondary side to obtain the address on the | ||
212 | * primary side. Non-bridge devices must list 0 for all Address | ||
213 | * Translation offset bits. | ||
214 | */ | ||
215 | if (addr->producer_consumer == ACPI_PRODUCER) { | ||
216 | res->start += attr->translation_offset; | ||
217 | res->end += attr->translation_offset; | ||
218 | } else if (attr->translation_offset) { | ||
219 | pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", | ||
220 | attr->translation_offset); | ||
221 | } | ||
208 | 222 | ||
209 | switch(addr.resource_type) { | 223 | switch (addr->resource_type) { |
210 | case ACPI_MEMORY_RANGE: | 224 | case ACPI_MEMORY_RANGE: |
211 | len = addr.maximum - addr.minimum + 1; | 225 | acpi_dev_memresource_flags(res, len, wp); |
212 | res->flags = acpi_dev_memresource_flags(len, | ||
213 | addr.info.mem.write_protect, | ||
214 | window); | ||
215 | break; | 226 | break; |
216 | case ACPI_IO_RANGE: | 227 | case ACPI_IO_RANGE: |
217 | io_decode = addr.granularity == 0xfff ? | 228 | acpi_dev_ioresource_flags(res, len, iodec); |
218 | ACPI_DECODE_10 : ACPI_DECODE_16; | ||
219 | res->flags = acpi_dev_ioresource_flags(addr.minimum, | ||
220 | addr.maximum, | ||
221 | io_decode, window); | ||
222 | break; | 229 | break; |
223 | case ACPI_BUS_NUMBER_RANGE: | 230 | case ACPI_BUS_NUMBER_RANGE: |
224 | res->flags = IORESOURCE_BUS; | 231 | res->flags = IORESOURCE_BUS; |
225 | break; | 232 | break; |
226 | default: | 233 | default: |
227 | res->flags = 0; | 234 | return false; |
228 | } | 235 | } |
229 | 236 | ||
230 | return true; | 237 | win->offset = attr->translation_offset; |
238 | |||
239 | if (addr->producer_consumer == ACPI_PRODUCER) | ||
240 | res->flags |= IORESOURCE_WINDOW; | ||
241 | |||
242 | if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) | ||
243 | res->flags |= IORESOURCE_PREFETCH; | ||
244 | |||
245 | return !(res->flags & IORESOURCE_DISABLED); | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * acpi_dev_resource_address_space - Extract ACPI address space information. | ||
250 | * @ares: Input ACPI resource object. | ||
251 | * @win: Output generic resource object. | ||
252 | * | ||
253 | * Check if the given ACPI resource object represents an address space resource | ||
254 | * and if that's the case, use the information in it to populate the generic | ||
255 | * resource object pointed to by @win. | ||
256 | * | ||
257 | * Return: | ||
258 | * 1) false with win->res.flags setting to zero: not the expected resource type | ||
259 | * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned | ||
260 | * resource | ||
261 | * 3) true: valid assigned resource | ||
262 | */ | ||
263 | bool acpi_dev_resource_address_space(struct acpi_resource *ares, | ||
264 | struct resource_win *win) | ||
265 | { | ||
266 | struct acpi_resource_address64 addr; | ||
267 | |||
268 | win->res.flags = 0; | ||
269 | if (ACPI_FAILURE(acpi_resource_to_address64(ares, &addr))) | ||
270 | return false; | ||
271 | |||
272 | return acpi_decode_space(win, (struct acpi_resource_address *)&addr, | ||
273 | &addr.address); | ||
231 | } | 274 | } |
232 | EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space); | 275 | EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space); |
233 | 276 | ||
234 | /** | 277 | /** |
235 | * acpi_dev_resource_ext_address_space - Extract ACPI address space information. | 278 | * acpi_dev_resource_ext_address_space - Extract ACPI address space information. |
236 | * @ares: Input ACPI resource object. | 279 | * @ares: Input ACPI resource object. |
237 | * @res: Output generic resource object. | 280 | * @win: Output generic resource object. |
238 | * | 281 | * |
239 | * Check if the given ACPI resource object represents an extended address space | 282 | * Check if the given ACPI resource object represents an extended address space |
240 | * resource and if that's the case, use the information in it to populate the | 283 | * resource and if that's the case, use the information in it to populate the |
241 | * generic resource object pointed to by @res. | 284 | * generic resource object pointed to by @win. |
285 | * | ||
286 | * Return: | ||
287 | * 1) false with win->res.flags setting to zero: not the expected resource type | ||
288 | * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned | ||
289 | * resource | ||
290 | * 3) true: valid assigned resource | ||
242 | */ | 291 | */ |
243 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, | 292 | bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, |
244 | struct resource *res) | 293 | struct resource_win *win) |
245 | { | 294 | { |
246 | struct acpi_resource_extended_address64 *ext_addr; | 295 | struct acpi_resource_extended_address64 *ext_addr; |
247 | bool window; | ||
248 | u64 len; | ||
249 | u8 io_decode; | ||
250 | 296 | ||
297 | win->res.flags = 0; | ||
251 | if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) | 298 | if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) |
252 | return false; | 299 | return false; |
253 | 300 | ||
254 | ext_addr = &ares->data.ext_address64; | 301 | ext_addr = &ares->data.ext_address64; |
255 | 302 | ||
256 | res->start = ext_addr->minimum; | 303 | return acpi_decode_space(win, (struct acpi_resource_address *)ext_addr, |
257 | res->end = ext_addr->maximum; | 304 | &ext_addr->address); |
258 | window = ext_addr->producer_consumer == ACPI_PRODUCER; | ||
259 | |||
260 | switch(ext_addr->resource_type) { | ||
261 | case ACPI_MEMORY_RANGE: | ||
262 | len = ext_addr->maximum - ext_addr->minimum + 1; | ||
263 | res->flags = acpi_dev_memresource_flags(len, | ||
264 | ext_addr->info.mem.write_protect, | ||
265 | window); | ||
266 | break; | ||
267 | case ACPI_IO_RANGE: | ||
268 | io_decode = ext_addr->granularity == 0xfff ? | ||
269 | ACPI_DECODE_10 : ACPI_DECODE_16; | ||
270 | res->flags = acpi_dev_ioresource_flags(ext_addr->minimum, | ||
271 | ext_addr->maximum, | ||
272 | io_decode, window); | ||
273 | break; | ||
274 | case ACPI_BUS_NUMBER_RANGE: | ||
275 | res->flags = IORESOURCE_BUS; | ||
276 | break; | ||
277 | default: | ||
278 | res->flags = 0; | ||
279 | } | ||
280 | |||
281 | return true; | ||
282 | } | 305 | } |
283 | EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); | 306 | EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); |
284 | 307 | ||
@@ -310,7 +333,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) | |||
310 | { | 333 | { |
311 | res->start = gsi; | 334 | res->start = gsi; |
312 | res->end = gsi; | 335 | res->end = gsi; |
313 | res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED; | 336 | res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; |
314 | } | 337 | } |
315 | 338 | ||
316 | static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, | 339 | static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, |
@@ -369,6 +392,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, | |||
369 | * represented by the resource and populate the generic resource object pointed | 392 | * represented by the resource and populate the generic resource object pointed |
370 | * to by @res accordingly. If the registration of the GSI is not successful, | 393 | * to by @res accordingly. If the registration of the GSI is not successful, |
371 | * IORESOURCE_DISABLED will be set it that object's flags. | 394 | * IORESOURCE_DISABLED will be set it that object's flags. |
395 | * | ||
396 | * Return: | ||
397 | * 1) false with res->flags setting to zero: not the expected resource type | ||
398 | * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource | ||
399 | * 3) true: valid assigned resource | ||
372 | */ | 400 | */ |
373 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | 401 | bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, |
374 | struct resource *res) | 402 | struct resource *res) |
@@ -402,6 +430,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | |||
402 | ext_irq->sharable, false); | 430 | ext_irq->sharable, false); |
403 | break; | 431 | break; |
404 | default: | 432 | default: |
433 | res->flags = 0; | ||
405 | return false; | 434 | return false; |
406 | } | 435 | } |
407 | 436 | ||
@@ -415,12 +444,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt); | |||
415 | */ | 444 | */ |
416 | void acpi_dev_free_resource_list(struct list_head *list) | 445 | void acpi_dev_free_resource_list(struct list_head *list) |
417 | { | 446 | { |
418 | struct resource_list_entry *rentry, *re; | 447 | resource_list_free(list); |
419 | |||
420 | list_for_each_entry_safe(rentry, re, list, node) { | ||
421 | list_del(&rentry->node); | ||
422 | kfree(rentry); | ||
423 | } | ||
424 | } | 448 | } |
425 | EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); | 449 | EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); |
426 | 450 | ||
@@ -432,18 +456,19 @@ struct res_proc_context { | |||
432 | int error; | 456 | int error; |
433 | }; | 457 | }; |
434 | 458 | ||
435 | static acpi_status acpi_dev_new_resource_entry(struct resource *r, | 459 | static acpi_status acpi_dev_new_resource_entry(struct resource_win *win, |
436 | struct res_proc_context *c) | 460 | struct res_proc_context *c) |
437 | { | 461 | { |
438 | struct resource_list_entry *rentry; | 462 | struct resource_entry *rentry; |
439 | 463 | ||
440 | rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); | 464 | rentry = resource_list_create_entry(NULL, 0); |
441 | if (!rentry) { | 465 | if (!rentry) { |
442 | c->error = -ENOMEM; | 466 | c->error = -ENOMEM; |
443 | return AE_NO_MEMORY; | 467 | return AE_NO_MEMORY; |
444 | } | 468 | } |
445 | rentry->res = *r; | 469 | *rentry->res = win->res; |
446 | list_add_tail(&rentry->node, c->list); | 470 | rentry->offset = win->offset; |
471 | resource_list_add_tail(rentry, c->list); | ||
447 | c->count++; | 472 | c->count++; |
448 | return AE_OK; | 473 | return AE_OK; |
449 | } | 474 | } |
@@ -452,7 +477,8 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, | |||
452 | void *context) | 477 | void *context) |
453 | { | 478 | { |
454 | struct res_proc_context *c = context; | 479 | struct res_proc_context *c = context; |
455 | struct resource r; | 480 | struct resource_win win; |
481 | struct resource *res = &win.res; | ||
456 | int i; | 482 | int i; |
457 | 483 | ||
458 | if (c->preproc) { | 484 | if (c->preproc) { |
@@ -467,18 +493,18 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, | |||
467 | } | 493 | } |
468 | } | 494 | } |
469 | 495 | ||
470 | memset(&r, 0, sizeof(r)); | 496 | memset(&win, 0, sizeof(win)); |
471 | 497 | ||
472 | if (acpi_dev_resource_memory(ares, &r) | 498 | if (acpi_dev_resource_memory(ares, res) |
473 | || acpi_dev_resource_io(ares, &r) | 499 | || acpi_dev_resource_io(ares, res) |
474 | || acpi_dev_resource_address_space(ares, &r) | 500 | || acpi_dev_resource_address_space(ares, &win) |
475 | || acpi_dev_resource_ext_address_space(ares, &r)) | 501 | || acpi_dev_resource_ext_address_space(ares, &win)) |
476 | return acpi_dev_new_resource_entry(&r, c); | 502 | return acpi_dev_new_resource_entry(&win, c); |
477 | 503 | ||
478 | for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) { | 504 | for (i = 0; acpi_dev_resource_interrupt(ares, i, res); i++) { |
479 | acpi_status status; | 505 | acpi_status status; |
480 | 506 | ||
481 | status = acpi_dev_new_resource_entry(&r, c); | 507 | status = acpi_dev_new_resource_entry(&win, c); |
482 | if (ACPI_FAILURE(status)) | 508 | if (ACPI_FAILURE(status)) |
483 | return status; | 509 | return status; |
484 | } | 510 | } |
@@ -503,7 +529,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, | |||
503 | * returned as the final error code. | 529 | * returned as the final error code. |
504 | * | 530 | * |
505 | * The resultant struct resource objects are put on the list pointed to by | 531 | * The resultant struct resource objects are put on the list pointed to by |
506 | * @list, that must be empty initially, as members of struct resource_list_entry | 532 | * @list, that must be empty initially, as members of struct resource_entry |
507 | * objects. Callers of this routine should use %acpi_dev_free_resource_list() to | 533 | * objects. Callers of this routine should use %acpi_dev_free_resource_list() to |
508 | * free that list. | 534 | * free that list. |
509 | * | 535 | * |
@@ -538,3 +564,58 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, | |||
538 | return c.count; | 564 | return c.count; |
539 | } | 565 | } |
540 | EXPORT_SYMBOL_GPL(acpi_dev_get_resources); | 566 | EXPORT_SYMBOL_GPL(acpi_dev_get_resources); |
567 | |||
568 | /** | ||
569 | * acpi_dev_filter_resource_type - Filter ACPI resource according to resource | ||
570 | * types | ||
571 | * @ares: Input ACPI resource object. | ||
572 | * @types: Valid resource types of IORESOURCE_XXX | ||
573 | * | ||
574 | * This is a hepler function to support acpi_dev_get_resources(), which filters | ||
575 | * ACPI resource objects according to resource types. | ||
576 | */ | ||
577 | int acpi_dev_filter_resource_type(struct acpi_resource *ares, | ||
578 | unsigned long types) | ||
579 | { | ||
580 | unsigned long type = 0; | ||
581 | |||
582 | switch (ares->type) { | ||
583 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
584 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
585 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
586 | type = IORESOURCE_MEM; | ||
587 | break; | ||
588 | case ACPI_RESOURCE_TYPE_IO: | ||
589 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
590 | type = IORESOURCE_IO; | ||
591 | break; | ||
592 | case ACPI_RESOURCE_TYPE_IRQ: | ||
593 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | ||
594 | type = IORESOURCE_IRQ; | ||
595 | break; | ||
596 | case ACPI_RESOURCE_TYPE_DMA: | ||
597 | case ACPI_RESOURCE_TYPE_FIXED_DMA: | ||
598 | type = IORESOURCE_DMA; | ||
599 | break; | ||
600 | case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: | ||
601 | type = IORESOURCE_REG; | ||
602 | break; | ||
603 | case ACPI_RESOURCE_TYPE_ADDRESS16: | ||
604 | case ACPI_RESOURCE_TYPE_ADDRESS32: | ||
605 | case ACPI_RESOURCE_TYPE_ADDRESS64: | ||
606 | case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: | ||
607 | if (ares->data.address.resource_type == ACPI_MEMORY_RANGE) | ||
608 | type = IORESOURCE_MEM; | ||
609 | else if (ares->data.address.resource_type == ACPI_IO_RANGE) | ||
610 | type = IORESOURCE_IO; | ||
611 | else if (ares->data.address.resource_type == | ||
612 | ACPI_BUS_NUMBER_RANGE) | ||
613 | type = IORESOURCE_BUS; | ||
614 | break; | ||
615 | default: | ||
616 | break; | ||
617 | } | ||
618 | |||
619 | return (type & types) ? 0 : 1; | ||
620 | } | ||
621 | EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type); | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index dc4d8960684a..bbca7830e18a 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -2544,6 +2544,7 @@ int __init acpi_scan_init(void) | |||
2544 | acpi_pci_link_init(); | 2544 | acpi_pci_link_init(); |
2545 | acpi_processor_init(); | 2545 | acpi_processor_init(); |
2546 | acpi_lpss_init(); | 2546 | acpi_lpss_init(); |
2547 | acpi_apd_init(); | ||
2547 | acpi_cmos_rtc_init(); | 2548 | acpi_cmos_rtc_init(); |
2548 | acpi_container_init(); | 2549 | acpi_container_init(); |
2549 | acpi_memory_hotplug_init(); | 2550 | acpi_memory_hotplug_init(); |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 8aa9254a387f..7f251dd1a687 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -321,7 +321,7 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = { | |||
321 | {}, | 321 | {}, |
322 | }; | 322 | }; |
323 | 323 | ||
324 | static void acpi_sleep_dmi_check(void) | 324 | static void __init acpi_sleep_dmi_check(void) |
325 | { | 325 | { |
326 | int year; | 326 | int year; |
327 | 327 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 032db459370f..88a4f99dd2a7 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -522,6 +522,24 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
522 | DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), | 522 | DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), |
523 | }, | 523 | }, |
524 | }, | 524 | }, |
525 | { | ||
526 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */ | ||
527 | .callback = video_disable_native_backlight, | ||
528 | .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV", | ||
529 | .matches = { | ||
530 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
531 | DMI_MATCH(DMI_PRODUCT_NAME, "3570R/370R/470R/450R/510R/4450RV"), | ||
532 | }, | ||
533 | }, | ||
534 | { | ||
535 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ | ||
536 | .callback = video_disable_native_backlight, | ||
537 | .ident = "SAMSUNG 730U3E/740U3E", | ||
538 | .matches = { | ||
539 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
540 | DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), | ||
541 | }, | ||
542 | }, | ||
525 | 543 | ||
526 | { | 544 | { |
527 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ | 545 | /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 5f601553b9b0..e7f338a3a3c2 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -269,7 +269,7 @@ config ATA_PIIX | |||
269 | 269 | ||
270 | config SATA_DWC | 270 | config SATA_DWC |
271 | tristate "DesignWare Cores SATA support" | 271 | tristate "DesignWare Cores SATA support" |
272 | depends on 460EX | 272 | depends on 460EX || (COMPILE_TEST && !(ARM || ARM64)) |
273 | help | 273 | help |
274 | This option enables support for the on-chip SATA controller of the | 274 | This option enables support for the on-chip SATA controller of the |
275 | AppliedMicro processor 460EX. | 275 | AppliedMicro processor 460EX. |
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 40f0e34f17af..71262e08648e 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h | |||
@@ -333,7 +333,7 @@ struct ahci_host_priv { | |||
333 | u32 em_msg_type; /* EM message type */ | 333 | u32 em_msg_type; /* EM message type */ |
334 | bool got_runtime_pm; /* Did we do pm_runtime_get? */ | 334 | bool got_runtime_pm; /* Did we do pm_runtime_get? */ |
335 | struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ | 335 | struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ |
336 | struct regulator *target_pwr; /* Optional */ | 336 | struct regulator **target_pwrs; /* Optional */ |
337 | /* | 337 | /* |
338 | * If platform uses PHYs. There is a 1:1 relation between the port number and | 338 | * If platform uses PHYs. There is a 1:1 relation between the port number and |
339 | * the PHY position in this array. | 339 | * the PHY position in this array. |
@@ -354,6 +354,10 @@ extern int ahci_ignore_sss; | |||
354 | extern struct device_attribute *ahci_shost_attrs[]; | 354 | extern struct device_attribute *ahci_shost_attrs[]; |
355 | extern struct device_attribute *ahci_sdev_attrs[]; | 355 | extern struct device_attribute *ahci_sdev_attrs[]; |
356 | 356 | ||
357 | /* | ||
358 | * This must be instantiated by the edge drivers. Read the comments | ||
359 | * for ATA_BASE_SHT | ||
360 | */ | ||
357 | #define AHCI_SHT(drv_name) \ | 361 | #define AHCI_SHT(drv_name) \ |
358 | ATA_NCQ_SHT(drv_name), \ | 362 | ATA_NCQ_SHT(drv_name), \ |
359 | .can_queue = AHCI_MAX_CMDS - 1, \ | 363 | .can_queue = AHCI_MAX_CMDS - 1, \ |
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c index ce8a7a6d6c7f..267a3d3e79f4 100644 --- a/drivers/ata/ahci_da850.c +++ b/drivers/ata/ahci_da850.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/ahci_platform.h> | 16 | #include <linux/ahci_platform.h> |
17 | #include "ahci.h" | 17 | #include "ahci.h" |
18 | 18 | ||
19 | #define DRV_NAME "ahci_da850" | ||
20 | |||
19 | /* SATA PHY Control Register offset from AHCI base */ | 21 | /* SATA PHY Control Register offset from AHCI base */ |
20 | #define SATA_P0PHYCR_REG 0x178 | 22 | #define SATA_P0PHYCR_REG 0x178 |
21 | 23 | ||
@@ -59,6 +61,10 @@ static const struct ata_port_info ahci_da850_port_info = { | |||
59 | .port_ops = &ahci_platform_ops, | 61 | .port_ops = &ahci_platform_ops, |
60 | }; | 62 | }; |
61 | 63 | ||
64 | static struct scsi_host_template ahci_platform_sht = { | ||
65 | AHCI_SHT(DRV_NAME), | ||
66 | }; | ||
67 | |||
62 | static int ahci_da850_probe(struct platform_device *pdev) | 68 | static int ahci_da850_probe(struct platform_device *pdev) |
63 | { | 69 | { |
64 | struct device *dev = &pdev->dev; | 70 | struct device *dev = &pdev->dev; |
@@ -85,7 +91,8 @@ static int ahci_da850_probe(struct platform_device *pdev) | |||
85 | 91 | ||
86 | da850_sata_init(dev, pwrdn_reg, hpriv->mmio); | 92 | da850_sata_init(dev, pwrdn_reg, hpriv->mmio); |
87 | 93 | ||
88 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info); | 94 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info, |
95 | &ahci_platform_sht); | ||
89 | if (rc) | 96 | if (rc) |
90 | goto disable_resources; | 97 | goto disable_resources; |
91 | 98 | ||
@@ -102,7 +109,7 @@ static struct platform_driver ahci_da850_driver = { | |||
102 | .probe = ahci_da850_probe, | 109 | .probe = ahci_da850_probe, |
103 | .remove = ata_platform_remove_one, | 110 | .remove = ata_platform_remove_one, |
104 | .driver = { | 111 | .driver = { |
105 | .name = "ahci_da850", | 112 | .name = DRV_NAME, |
106 | .pm = &ahci_da850_pm_ops, | 113 | .pm = &ahci_da850_pm_ops, |
107 | }, | 114 | }, |
108 | }; | 115 | }; |
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 35d51c59a370..3f3a7db208ae 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/libata.h> | 28 | #include <linux/libata.h> |
29 | #include "ahci.h" | 29 | #include "ahci.h" |
30 | 30 | ||
31 | #define DRV_NAME "ahci-imx" | ||
32 | |||
31 | enum { | 33 | enum { |
32 | /* Timer 1-ms Register */ | 34 | /* Timer 1-ms Register */ |
33 | IMX_TIMER1MS = 0x00e0, | 35 | IMX_TIMER1MS = 0x00e0, |
@@ -221,11 +223,9 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) | |||
221 | if (imxpriv->no_device) | 223 | if (imxpriv->no_device) |
222 | return 0; | 224 | return 0; |
223 | 225 | ||
224 | if (hpriv->target_pwr) { | 226 | ret = ahci_platform_enable_regulators(hpriv); |
225 | ret = regulator_enable(hpriv->target_pwr); | 227 | if (ret) |
226 | if (ret) | 228 | return ret; |
227 | return ret; | ||
228 | } | ||
229 | 229 | ||
230 | ret = clk_prepare_enable(imxpriv->sata_ref_clk); | 230 | ret = clk_prepare_enable(imxpriv->sata_ref_clk); |
231 | if (ret < 0) | 231 | if (ret < 0) |
@@ -270,8 +270,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) | |||
270 | disable_clk: | 270 | disable_clk: |
271 | clk_disable_unprepare(imxpriv->sata_ref_clk); | 271 | clk_disable_unprepare(imxpriv->sata_ref_clk); |
272 | disable_regulator: | 272 | disable_regulator: |
273 | if (hpriv->target_pwr) | 273 | ahci_platform_disable_regulators(hpriv); |
274 | regulator_disable(hpriv->target_pwr); | ||
275 | 274 | ||
276 | return ret; | 275 | return ret; |
277 | } | 276 | } |
@@ -291,8 +290,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv) | |||
291 | 290 | ||
292 | clk_disable_unprepare(imxpriv->sata_ref_clk); | 291 | clk_disable_unprepare(imxpriv->sata_ref_clk); |
293 | 292 | ||
294 | if (hpriv->target_pwr) | 293 | ahci_platform_disable_regulators(hpriv); |
295 | regulator_disable(hpriv->target_pwr); | ||
296 | } | 294 | } |
297 | 295 | ||
298 | static void ahci_imx_error_handler(struct ata_port *ap) | 296 | static void ahci_imx_error_handler(struct ata_port *ap) |
@@ -524,6 +522,10 @@ static u32 imx_ahci_parse_props(struct device *dev, | |||
524 | return reg_value; | 522 | return reg_value; |
525 | } | 523 | } |
526 | 524 | ||
525 | static struct scsi_host_template ahci_platform_sht = { | ||
526 | AHCI_SHT(DRV_NAME), | ||
527 | }; | ||
528 | |||
527 | static int imx_ahci_probe(struct platform_device *pdev) | 529 | static int imx_ahci_probe(struct platform_device *pdev) |
528 | { | 530 | { |
529 | struct device *dev = &pdev->dev; | 531 | struct device *dev = &pdev->dev; |
@@ -620,7 +622,8 @@ static int imx_ahci_probe(struct platform_device *pdev) | |||
620 | reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; | 622 | reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; |
621 | writel(reg_val, hpriv->mmio + IMX_TIMER1MS); | 623 | writel(reg_val, hpriv->mmio + IMX_TIMER1MS); |
622 | 624 | ||
623 | ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info); | 625 | ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, |
626 | &ahci_platform_sht); | ||
624 | if (ret) | 627 | if (ret) |
625 | goto disable_sata; | 628 | goto disable_sata; |
626 | 629 | ||
@@ -678,7 +681,7 @@ static struct platform_driver imx_ahci_driver = { | |||
678 | .probe = imx_ahci_probe, | 681 | .probe = imx_ahci_probe, |
679 | .remove = ata_platform_remove_one, | 682 | .remove = ata_platform_remove_one, |
680 | .driver = { | 683 | .driver = { |
681 | .name = "ahci-imx", | 684 | .name = DRV_NAME, |
682 | .of_match_table = imx_ahci_of_match, | 685 | .of_match_table = imx_ahci_of_match, |
683 | .pm = &ahci_imx_pm_ops, | 686 | .pm = &ahci_imx_pm_ops, |
684 | }, | 687 | }, |
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index 64bb08432b69..23716dd8a7ec 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include "ahci.h" | 20 | #include "ahci.h" |
21 | 21 | ||
22 | #define DRV_NAME "ahci-mvebu" | ||
23 | |||
22 | #define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0 | 24 | #define AHCI_VENDOR_SPECIFIC_0_ADDR 0xa0 |
23 | #define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4 | 25 | #define AHCI_VENDOR_SPECIFIC_0_DATA 0xa4 |
24 | 26 | ||
@@ -67,6 +69,10 @@ static const struct ata_port_info ahci_mvebu_port_info = { | |||
67 | .port_ops = &ahci_platform_ops, | 69 | .port_ops = &ahci_platform_ops, |
68 | }; | 70 | }; |
69 | 71 | ||
72 | static struct scsi_host_template ahci_platform_sht = { | ||
73 | AHCI_SHT(DRV_NAME), | ||
74 | }; | ||
75 | |||
70 | static int ahci_mvebu_probe(struct platform_device *pdev) | 76 | static int ahci_mvebu_probe(struct platform_device *pdev) |
71 | { | 77 | { |
72 | struct ahci_host_priv *hpriv; | 78 | struct ahci_host_priv *hpriv; |
@@ -88,7 +94,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev) | |||
88 | ahci_mvebu_mbus_config(hpriv, dram); | 94 | ahci_mvebu_mbus_config(hpriv, dram); |
89 | ahci_mvebu_regret_option(hpriv); | 95 | ahci_mvebu_regret_option(hpriv); |
90 | 96 | ||
91 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info); | 97 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, |
98 | &ahci_platform_sht); | ||
92 | if (rc) | 99 | if (rc) |
93 | goto disable_resources; | 100 | goto disable_resources; |
94 | 101 | ||
@@ -114,7 +121,7 @@ static struct platform_driver ahci_mvebu_driver = { | |||
114 | .probe = ahci_mvebu_probe, | 121 | .probe = ahci_mvebu_probe, |
115 | .remove = ata_platform_remove_one, | 122 | .remove = ata_platform_remove_one, |
116 | .driver = { | 123 | .driver = { |
117 | .name = "ahci-mvebu", | 124 | .name = DRV_NAME, |
118 | .of_match_table = ahci_mvebu_of_match, | 125 | .of_match_table = ahci_mvebu_of_match, |
119 | }, | 126 | }, |
120 | }; | 127 | }; |
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 18d539837045..78d6ae0b90c4 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/ahci_platform.h> | 22 | #include <linux/ahci_platform.h> |
23 | #include "ahci.h" | 23 | #include "ahci.h" |
24 | 24 | ||
25 | #define DRV_NAME "ahci" | ||
26 | |||
25 | static const struct ata_port_info ahci_port_info = { | 27 | static const struct ata_port_info ahci_port_info = { |
26 | .flags = AHCI_FLAG_COMMON, | 28 | .flags = AHCI_FLAG_COMMON, |
27 | .pio_mask = ATA_PIO4, | 29 | .pio_mask = ATA_PIO4, |
@@ -29,6 +31,10 @@ static const struct ata_port_info ahci_port_info = { | |||
29 | .port_ops = &ahci_platform_ops, | 31 | .port_ops = &ahci_platform_ops, |
30 | }; | 32 | }; |
31 | 33 | ||
34 | static struct scsi_host_template ahci_platform_sht = { | ||
35 | AHCI_SHT(DRV_NAME), | ||
36 | }; | ||
37 | |||
32 | static int ahci_probe(struct platform_device *pdev) | 38 | static int ahci_probe(struct platform_device *pdev) |
33 | { | 39 | { |
34 | struct device *dev = &pdev->dev; | 40 | struct device *dev = &pdev->dev; |
@@ -46,7 +52,8 @@ static int ahci_probe(struct platform_device *pdev) | |||
46 | if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) | 52 | if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) |
47 | hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; | 53 | hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; |
48 | 54 | ||
49 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info); | 55 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, |
56 | &ahci_platform_sht); | ||
50 | if (rc) | 57 | if (rc) |
51 | goto disable_resources; | 58 | goto disable_resources; |
52 | 59 | ||
@@ -75,7 +82,7 @@ static struct platform_driver ahci_driver = { | |||
75 | .probe = ahci_probe, | 82 | .probe = ahci_probe, |
76 | .remove = ata_platform_remove_one, | 83 | .remove = ata_platform_remove_one, |
77 | .driver = { | 84 | .driver = { |
78 | .name = "ahci", | 85 | .name = DRV_NAME, |
79 | .of_match_table = ahci_of_match, | 86 | .of_match_table = ahci_of_match, |
80 | .pm = &ahci_pm_ops, | 87 | .pm = &ahci_pm_ops, |
81 | }, | 88 | }, |
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index 2f9e8317cc16..bc971af262e7 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c | |||
@@ -23,6 +23,8 @@ | |||
23 | 23 | ||
24 | #include "ahci.h" | 24 | #include "ahci.h" |
25 | 25 | ||
26 | #define DRV_NAME "st_ahci" | ||
27 | |||
26 | #define ST_AHCI_OOBR 0xbc | 28 | #define ST_AHCI_OOBR 0xbc |
27 | #define ST_AHCI_OOBR_WE BIT(31) | 29 | #define ST_AHCI_OOBR_WE BIT(31) |
28 | #define ST_AHCI_OOBR_CWMIN_SHIFT 24 | 30 | #define ST_AHCI_OOBR_CWMIN_SHIFT 24 |
@@ -140,6 +142,10 @@ static const struct ata_port_info st_ahci_port_info = { | |||
140 | .port_ops = &st_ahci_port_ops, | 142 | .port_ops = &st_ahci_port_ops, |
141 | }; | 143 | }; |
142 | 144 | ||
145 | static struct scsi_host_template ahci_platform_sht = { | ||
146 | AHCI_SHT(DRV_NAME), | ||
147 | }; | ||
148 | |||
143 | static int st_ahci_probe(struct platform_device *pdev) | 149 | static int st_ahci_probe(struct platform_device *pdev) |
144 | { | 150 | { |
145 | struct st_ahci_drv_data *drv_data; | 151 | struct st_ahci_drv_data *drv_data; |
@@ -166,7 +172,8 @@ static int st_ahci_probe(struct platform_device *pdev) | |||
166 | if (err) | 172 | if (err) |
167 | return err; | 173 | return err; |
168 | 174 | ||
169 | err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info); | 175 | err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, |
176 | &ahci_platform_sht); | ||
170 | if (err) { | 177 | if (err) { |
171 | ahci_platform_disable_resources(hpriv); | 178 | ahci_platform_disable_resources(hpriv); |
172 | return err; | 179 | return err; |
@@ -229,7 +236,7 @@ MODULE_DEVICE_TABLE(of, st_ahci_match); | |||
229 | 236 | ||
230 | static struct platform_driver st_ahci_driver = { | 237 | static struct platform_driver st_ahci_driver = { |
231 | .driver = { | 238 | .driver = { |
232 | .name = "st_ahci", | 239 | .name = DRV_NAME, |
233 | .pm = &st_ahci_pm_ops, | 240 | .pm = &st_ahci_pm_ops, |
234 | .of_match_table = of_match_ptr(st_ahci_match), | 241 | .of_match_table = of_match_ptr(st_ahci_match), |
235 | }, | 242 | }, |
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c index e2e0da539a2f..b26437430163 100644 --- a/drivers/ata/ahci_sunxi.c +++ b/drivers/ata/ahci_sunxi.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/regulator/consumer.h> | 27 | #include <linux/regulator/consumer.h> |
28 | #include "ahci.h" | 28 | #include "ahci.h" |
29 | 29 | ||
30 | #define DRV_NAME "ahci-sunxi" | ||
31 | |||
30 | /* Insmod parameters */ | 32 | /* Insmod parameters */ |
31 | static bool enable_pmp; | 33 | static bool enable_pmp; |
32 | module_param(enable_pmp, bool, 0); | 34 | module_param(enable_pmp, bool, 0); |
@@ -169,6 +171,10 @@ static const struct ata_port_info ahci_sunxi_port_info = { | |||
169 | .port_ops = &ahci_platform_ops, | 171 | .port_ops = &ahci_platform_ops, |
170 | }; | 172 | }; |
171 | 173 | ||
174 | static struct scsi_host_template ahci_platform_sht = { | ||
175 | AHCI_SHT(DRV_NAME), | ||
176 | }; | ||
177 | |||
172 | static int ahci_sunxi_probe(struct platform_device *pdev) | 178 | static int ahci_sunxi_probe(struct platform_device *pdev) |
173 | { | 179 | { |
174 | struct device *dev = &pdev->dev; | 180 | struct device *dev = &pdev->dev; |
@@ -200,7 +206,8 @@ static int ahci_sunxi_probe(struct platform_device *pdev) | |||
200 | if (!enable_pmp) | 206 | if (!enable_pmp) |
201 | hpriv->flags |= AHCI_HFLAG_NO_PMP; | 207 | hpriv->flags |= AHCI_HFLAG_NO_PMP; |
202 | 208 | ||
203 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info); | 209 | rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info, |
210 | &ahci_platform_sht); | ||
204 | if (rc) | 211 | if (rc) |
205 | goto disable_resources; | 212 | goto disable_resources; |
206 | 213 | ||
@@ -251,7 +258,7 @@ static struct platform_driver ahci_sunxi_driver = { | |||
251 | .probe = ahci_sunxi_probe, | 258 | .probe = ahci_sunxi_probe, |
252 | .remove = ata_platform_remove_one, | 259 | .remove = ata_platform_remove_one, |
253 | .driver = { | 260 | .driver = { |
254 | .name = "ahci-sunxi", | 261 | .name = DRV_NAME, |
255 | .of_match_table = ahci_sunxi_of_match, | 262 | .of_match_table = ahci_sunxi_of_match, |
256 | .pm = &ahci_sunxi_pm_ops, | 263 | .pm = &ahci_sunxi_pm_ops, |
257 | }, | 264 | }, |
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c index 032904402c95..3a62eb246d80 100644 --- a/drivers/ata/ahci_tegra.c +++ b/drivers/ata/ahci_tegra.c | |||
@@ -31,6 +31,8 @@ | |||
31 | 31 | ||
32 | #include "ahci.h" | 32 | #include "ahci.h" |
33 | 33 | ||
34 | #define DRV_NAME "tegra-ahci" | ||
35 | |||
34 | #define SATA_CONFIGURATION_0 0x180 | 36 | #define SATA_CONFIGURATION_0 0x180 |
35 | #define SATA_CONFIGURATION_EN_FPCI BIT(0) | 37 | #define SATA_CONFIGURATION_EN_FPCI BIT(0) |
36 | 38 | ||
@@ -289,6 +291,10 @@ static const struct of_device_id tegra_ahci_of_match[] = { | |||
289 | }; | 291 | }; |
290 | MODULE_DEVICE_TABLE(of, tegra_ahci_of_match); | 292 | MODULE_DEVICE_TABLE(of, tegra_ahci_of_match); |
291 | 293 | ||
294 | static struct scsi_host_template ahci_platform_sht = { | ||
295 | AHCI_SHT(DRV_NAME), | ||
296 | }; | ||
297 | |||
292 | static int tegra_ahci_probe(struct platform_device *pdev) | 298 | static int tegra_ahci_probe(struct platform_device *pdev) |
293 | { | 299 | { |
294 | struct ahci_host_priv *hpriv; | 300 | struct ahci_host_priv *hpriv; |
@@ -354,7 +360,8 @@ static int tegra_ahci_probe(struct platform_device *pdev) | |||
354 | if (ret) | 360 | if (ret) |
355 | return ret; | 361 | return ret; |
356 | 362 | ||
357 | ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info); | 363 | ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info, |
364 | &ahci_platform_sht); | ||
358 | if (ret) | 365 | if (ret) |
359 | goto deinit_controller; | 366 | goto deinit_controller; |
360 | 367 | ||
@@ -370,7 +377,7 @@ static struct platform_driver tegra_ahci_driver = { | |||
370 | .probe = tegra_ahci_probe, | 377 | .probe = tegra_ahci_probe, |
371 | .remove = ata_platform_remove_one, | 378 | .remove = ata_platform_remove_one, |
372 | .driver = { | 379 | .driver = { |
373 | .name = "tegra-ahci", | 380 | .name = DRV_NAME, |
374 | .of_match_table = tegra_ahci_of_match, | 381 | .of_match_table = tegra_ahci_of_match, |
375 | }, | 382 | }, |
376 | /* LP0 suspend support not implemented */ | 383 | /* LP0 suspend support not implemented */ |
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index cbcd20810355..2e8bb603e447 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/phy/phy.h> | 30 | #include <linux/phy/phy.h> |
31 | #include "ahci.h" | 31 | #include "ahci.h" |
32 | 32 | ||
33 | #define DRV_NAME "xgene-ahci" | ||
34 | |||
33 | /* Max # of disk per a controller */ | 35 | /* Max # of disk per a controller */ |
34 | #define MAX_AHCI_CHN_PERCTR 2 | 36 | #define MAX_AHCI_CHN_PERCTR 2 |
35 | 37 | ||
@@ -85,6 +87,7 @@ struct xgene_ahci_context { | |||
85 | struct ahci_host_priv *hpriv; | 87 | struct ahci_host_priv *hpriv; |
86 | struct device *dev; | 88 | struct device *dev; |
87 | u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/ | 89 | u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/ |
90 | u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */ | ||
88 | void __iomem *csr_core; /* Core CSR address of IP */ | 91 | void __iomem *csr_core; /* Core CSR address of IP */ |
89 | void __iomem *csr_diag; /* Diag CSR address of IP */ | 92 | void __iomem *csr_diag; /* Diag CSR address of IP */ |
90 | void __iomem *csr_axi; /* AXI CSR address of IP */ | 93 | void __iomem *csr_axi; /* AXI CSR address of IP */ |
@@ -105,17 +108,69 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx) | |||
105 | } | 108 | } |
106 | 109 | ||
107 | /** | 110 | /** |
111 | * xgene_ahci_poll_reg_val- Poll a register on a specific value. | ||
112 | * @ap : ATA port of interest. | ||
113 | * @reg : Register of interest. | ||
114 | * @val : Value to be attained. | ||
115 | * @interval : waiting interval for polling. | ||
116 | * @timeout : timeout for achieving the value. | ||
117 | */ | ||
118 | static int xgene_ahci_poll_reg_val(struct ata_port *ap, | ||
119 | void __iomem *reg, unsigned | ||
120 | int val, unsigned long interval, | ||
121 | unsigned long timeout) | ||
122 | { | ||
123 | unsigned long deadline; | ||
124 | unsigned int tmp; | ||
125 | |||
126 | tmp = ioread32(reg); | ||
127 | deadline = ata_deadline(jiffies, timeout); | ||
128 | |||
129 | while (tmp != val && time_before(jiffies, deadline)) { | ||
130 | ata_msleep(ap, interval); | ||
131 | tmp = ioread32(reg); | ||
132 | } | ||
133 | |||
134 | return tmp; | ||
135 | } | ||
136 | |||
137 | /** | ||
108 | * xgene_ahci_restart_engine - Restart the dma engine. | 138 | * xgene_ahci_restart_engine - Restart the dma engine. |
109 | * @ap : ATA port of interest | 139 | * @ap : ATA port of interest |
110 | * | 140 | * |
111 | * Restarts the dma engine inside the controller. | 141 | * Waits for completion of multiple commands and restarts |
142 | * the DMA engine inside the controller. | ||
112 | */ | 143 | */ |
113 | static int xgene_ahci_restart_engine(struct ata_port *ap) | 144 | static int xgene_ahci_restart_engine(struct ata_port *ap) |
114 | { | 145 | { |
115 | struct ahci_host_priv *hpriv = ap->host->private_data; | 146 | struct ahci_host_priv *hpriv = ap->host->private_data; |
147 | struct ahci_port_priv *pp = ap->private_data; | ||
148 | void __iomem *port_mmio = ahci_port_base(ap); | ||
149 | u32 fbs; | ||
150 | |||
151 | /* | ||
152 | * In case of PMP multiple IDENTIFY DEVICE commands can be | ||
153 | * issued inside PxCI. So need to poll PxCI for the | ||
154 | * completion of outstanding IDENTIFY DEVICE commands before | ||
155 | * we restart the DMA engine. | ||
156 | */ | ||
157 | if (xgene_ahci_poll_reg_val(ap, port_mmio + | ||
158 | PORT_CMD_ISSUE, 0x0, 1, 100)) | ||
159 | return -EBUSY; | ||
116 | 160 | ||
117 | ahci_stop_engine(ap); | 161 | ahci_stop_engine(ap); |
118 | ahci_start_fis_rx(ap); | 162 | ahci_start_fis_rx(ap); |
163 | |||
164 | /* | ||
165 | * Enable the PxFBS.FBS_EN bit as it | ||
166 | * gets cleared due to stopping the engine. | ||
167 | */ | ||
168 | if (pp->fbs_supported) { | ||
169 | fbs = readl(port_mmio + PORT_FBS); | ||
170 | writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS); | ||
171 | fbs = readl(port_mmio + PORT_FBS); | ||
172 | } | ||
173 | |||
119 | hpriv->start_engine(ap); | 174 | hpriv->start_engine(ap); |
120 | 175 | ||
121 | return 0; | 176 | return 0; |
@@ -125,11 +180,17 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) | |||
125 | * xgene_ahci_qc_issue - Issue commands to the device | 180 | * xgene_ahci_qc_issue - Issue commands to the device |
126 | * @qc: Command to issue | 181 | * @qc: Command to issue |
127 | * | 182 | * |
128 | * Due to Hardware errata for IDENTIFY DEVICE command and PACKET | 183 | * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot |
129 | * command of ATAPI protocol set, the controller cannot clear the BSY bit | 184 | * clear the BSY bit after receiving the PIO setup FIS. This results in the dma |
130 | * after receiving the PIO setup FIS. This results in the DMA state machine | 185 | * state machine goes into the CMFatalErrorUpdate state and locks up. By |
131 | * going into the CMFatalErrorUpdate state and locks up. By restarting the | 186 | * restarting the dma engine, it removes the controller out of lock up state. |
132 | * DMA engine, it removes the controller out of lock up state. | 187 | * |
188 | * Due to H/W errata, the controller is unable to save the PMP | ||
189 | * field fetched from command header before sending the H2D FIS. | ||
190 | * When the device returns the PMP port field in the D2H FIS, there is | ||
191 | * a mismatch and results in command completion failure. The | ||
192 | * workaround is to write the pmp value to PxFBS.DEV field before issuing | ||
193 | * any command to PMP. | ||
133 | */ | 194 | */ |
134 | static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) | 195 | static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) |
135 | { | 196 | { |
@@ -137,9 +198,23 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) | |||
137 | struct ahci_host_priv *hpriv = ap->host->private_data; | 198 | struct ahci_host_priv *hpriv = ap->host->private_data; |
138 | struct xgene_ahci_context *ctx = hpriv->plat_data; | 199 | struct xgene_ahci_context *ctx = hpriv->plat_data; |
139 | int rc = 0; | 200 | int rc = 0; |
201 | u32 port_fbs; | ||
202 | void *port_mmio = ahci_port_base(ap); | ||
203 | |||
204 | /* | ||
205 | * Write the pmp value to PxFBS.DEV | ||
206 | * for case of Port Mulitplier. | ||
207 | */ | ||
208 | if (ctx->class[ap->port_no] == ATA_DEV_PMP) { | ||
209 | port_fbs = readl(port_mmio + PORT_FBS); | ||
210 | port_fbs &= ~PORT_FBS_DEV_MASK; | ||
211 | port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; | ||
212 | writel(port_fbs, port_mmio + PORT_FBS); | ||
213 | } | ||
140 | 214 | ||
141 | if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) || | 215 | if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) || |
142 | (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET))) | 216 | (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) || |
217 | (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART))) | ||
143 | xgene_ahci_restart_engine(ap); | 218 | xgene_ahci_restart_engine(ap); |
144 | 219 | ||
145 | rc = ahci_qc_issue(qc); | 220 | rc = ahci_qc_issue(qc); |
@@ -365,16 +440,119 @@ static void xgene_ahci_host_stop(struct ata_host *host) | |||
365 | ahci_platform_disable_resources(hpriv); | 440 | ahci_platform_disable_resources(hpriv); |
366 | } | 441 | } |
367 | 442 | ||
443 | /** | ||
444 | * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected | ||
445 | * to Port Multiplier. | ||
446 | * @link: link to reset | ||
447 | * @class: Return value to indicate class of device | ||
448 | * @deadline: deadline jiffies for the operation | ||
449 | * | ||
450 | * Due to H/W errata, the controller is unable to save the PMP | ||
451 | * field fetched from command header before sending the H2D FIS. | ||
452 | * When the device returns the PMP port field in the D2H FIS, there is | ||
453 | * a mismatch and results in command completion failure. The workaround | ||
454 | * is to write the pmp value to PxFBS.DEV field before issuing any command | ||
455 | * to PMP. | ||
456 | */ | ||
457 | static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class, | ||
458 | unsigned long deadline) | ||
459 | { | ||
460 | int pmp = sata_srst_pmp(link); | ||
461 | struct ata_port *ap = link->ap; | ||
462 | u32 rc; | ||
463 | void *port_mmio = ahci_port_base(ap); | ||
464 | u32 port_fbs; | ||
465 | |||
466 | /* | ||
467 | * Set PxFBS.DEV field with pmp | ||
468 | * value. | ||
469 | */ | ||
470 | port_fbs = readl(port_mmio + PORT_FBS); | ||
471 | port_fbs &= ~PORT_FBS_DEV_MASK; | ||
472 | port_fbs |= pmp << PORT_FBS_DEV_OFFSET; | ||
473 | writel(port_fbs, port_mmio + PORT_FBS); | ||
474 | |||
475 | rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); | ||
476 | |||
477 | return rc; | ||
478 | } | ||
479 | |||
480 | /** | ||
481 | * xgene_ahci_softreset - Issue the softreset to the drive. | ||
482 | * @link: link to reset | ||
483 | * @class: Return value to indicate class of device | ||
484 | * @deadline: deadline jiffies for the operation | ||
485 | * | ||
486 | * Due to H/W errata, the controller is unable to save the PMP | ||
487 | * field fetched from command header before sending the H2D FIS. | ||
488 | * When the device returns the PMP port field in the D2H FIS, there is | ||
489 | * a mismatch and results in command completion failure. The workaround | ||
490 | * is to write the pmp value to PxFBS.DEV field before issuing any command | ||
491 | * to PMP. Here is the algorithm to detect PMP : | ||
492 | * | ||
493 | * 1. Save the PxFBS value | ||
494 | * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends | ||
495 | * 0xF for both PMP/NON-PMP initially | ||
496 | * 3. Issue softreset | ||
497 | * 4. If signature class is PMP goto 6 | ||
498 | * 5. restore the original PxFBS and goto 3 | ||
499 | * 6. return | ||
500 | */ | ||
501 | static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class, | ||
502 | unsigned long deadline) | ||
503 | { | ||
504 | int pmp = sata_srst_pmp(link); | ||
505 | struct ata_port *ap = link->ap; | ||
506 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
507 | struct xgene_ahci_context *ctx = hpriv->plat_data; | ||
508 | void *port_mmio = ahci_port_base(ap); | ||
509 | u32 port_fbs; | ||
510 | u32 port_fbs_save; | ||
511 | u32 retry = 1; | ||
512 | u32 rc; | ||
513 | |||
514 | port_fbs_save = readl(port_mmio + PORT_FBS); | ||
515 | |||
516 | /* | ||
517 | * Set PxFBS.DEV field with pmp | ||
518 | * value. | ||
519 | */ | ||
520 | port_fbs = readl(port_mmio + PORT_FBS); | ||
521 | port_fbs &= ~PORT_FBS_DEV_MASK; | ||
522 | port_fbs |= pmp << PORT_FBS_DEV_OFFSET; | ||
523 | writel(port_fbs, port_mmio + PORT_FBS); | ||
524 | |||
525 | softreset_retry: | ||
526 | rc = ahci_do_softreset(link, class, pmp, | ||
527 | deadline, ahci_check_ready); | ||
528 | |||
529 | ctx->class[ap->port_no] = *class; | ||
530 | if (*class != ATA_DEV_PMP) { | ||
531 | /* | ||
532 | * Retry for normal drives without | ||
533 | * setting PxFBS.DEV field with pmp value. | ||
534 | */ | ||
535 | if (retry--) { | ||
536 | writel(port_fbs_save, port_mmio + PORT_FBS); | ||
537 | goto softreset_retry; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | return rc; | ||
542 | } | ||
543 | |||
368 | static struct ata_port_operations xgene_ahci_ops = { | 544 | static struct ata_port_operations xgene_ahci_ops = { |
369 | .inherits = &ahci_ops, | 545 | .inherits = &ahci_ops, |
370 | .host_stop = xgene_ahci_host_stop, | 546 | .host_stop = xgene_ahci_host_stop, |
371 | .hardreset = xgene_ahci_hardreset, | 547 | .hardreset = xgene_ahci_hardreset, |
372 | .read_id = xgene_ahci_read_id, | 548 | .read_id = xgene_ahci_read_id, |
373 | .qc_issue = xgene_ahci_qc_issue, | 549 | .qc_issue = xgene_ahci_qc_issue, |
550 | .softreset = xgene_ahci_softreset, | ||
551 | .pmp_softreset = xgene_ahci_pmp_softreset | ||
374 | }; | 552 | }; |
375 | 553 | ||
376 | static const struct ata_port_info xgene_ahci_port_info = { | 554 | static const struct ata_port_info xgene_ahci_port_info = { |
377 | .flags = AHCI_FLAG_COMMON, | 555 | .flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP, |
378 | .pio_mask = ATA_PIO4, | 556 | .pio_mask = ATA_PIO4, |
379 | .udma_mask = ATA_UDMA6, | 557 | .udma_mask = ATA_UDMA6, |
380 | .port_ops = &xgene_ahci_ops, | 558 | .port_ops = &xgene_ahci_ops, |
@@ -446,6 +624,10 @@ static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx) | |||
446 | return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0; | 624 | return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0; |
447 | } | 625 | } |
448 | 626 | ||
627 | static struct scsi_host_template ahci_platform_sht = { | ||
628 | AHCI_SHT(DRV_NAME), | ||
629 | }; | ||
630 | |||
449 | static int xgene_ahci_probe(struct platform_device *pdev) | 631 | static int xgene_ahci_probe(struct platform_device *pdev) |
450 | { | 632 | { |
451 | struct device *dev = &pdev->dev; | 633 | struct device *dev = &pdev->dev; |
@@ -523,7 +705,8 @@ static int xgene_ahci_probe(struct platform_device *pdev) | |||
523 | skip_clk_phy: | 705 | skip_clk_phy: |
524 | hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ; | 706 | hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ; |
525 | 707 | ||
526 | rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info); | 708 | rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info, |
709 | &ahci_platform_sht); | ||
527 | if (rc) | 710 | if (rc) |
528 | goto disable_resources; | 711 | goto disable_resources; |
529 | 712 | ||
@@ -545,7 +728,7 @@ static struct platform_driver xgene_ahci_driver = { | |||
545 | .probe = xgene_ahci_probe, | 728 | .probe = xgene_ahci_probe, |
546 | .remove = ata_platform_remove_one, | 729 | .remove = ata_platform_remove_one, |
547 | .driver = { | 730 | .driver = { |
548 | .name = "xgene-ahci", | 731 | .name = DRV_NAME, |
549 | .of_match_table = xgene_ahci_of_match, | 732 | .of_match_table = xgene_ahci_of_match, |
550 | }, | 733 | }, |
551 | }; | 734 | }; |
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 0b03f9056692..d89305d289f6 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/ahci_platform.h> | 24 | #include <linux/ahci_platform.h> |
25 | #include <linux/phy/phy.h> | 25 | #include <linux/phy/phy.h> |
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/of_platform.h> | ||
27 | #include "ahci.h" | 28 | #include "ahci.h" |
28 | 29 | ||
29 | static void ahci_host_stop(struct ata_host *host); | 30 | static void ahci_host_stop(struct ata_host *host); |
@@ -34,10 +35,6 @@ struct ata_port_operations ahci_platform_ops = { | |||
34 | }; | 35 | }; |
35 | EXPORT_SYMBOL_GPL(ahci_platform_ops); | 36 | EXPORT_SYMBOL_GPL(ahci_platform_ops); |
36 | 37 | ||
37 | static struct scsi_host_template ahci_platform_sht = { | ||
38 | AHCI_SHT("ahci_platform"), | ||
39 | }; | ||
40 | |||
41 | /** | 38 | /** |
42 | * ahci_platform_enable_phys - Enable PHYs | 39 | * ahci_platform_enable_phys - Enable PHYs |
43 | * @hpriv: host private area to store config values | 40 | * @hpriv: host private area to store config values |
@@ -54,9 +51,6 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv) | |||
54 | int rc, i; | 51 | int rc, i; |
55 | 52 | ||
56 | for (i = 0; i < hpriv->nports; i++) { | 53 | for (i = 0; i < hpriv->nports; i++) { |
57 | if (!hpriv->phys[i]) | ||
58 | continue; | ||
59 | |||
60 | rc = phy_init(hpriv->phys[i]); | 54 | rc = phy_init(hpriv->phys[i]); |
61 | if (rc) | 55 | if (rc) |
62 | goto disable_phys; | 56 | goto disable_phys; |
@@ -89,9 +83,6 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv) | |||
89 | int i; | 83 | int i; |
90 | 84 | ||
91 | for (i = 0; i < hpriv->nports; i++) { | 85 | for (i = 0; i < hpriv->nports; i++) { |
92 | if (!hpriv->phys[i]) | ||
93 | continue; | ||
94 | |||
95 | phy_power_off(hpriv->phys[i]); | 86 | phy_power_off(hpriv->phys[i]); |
96 | phy_exit(hpriv->phys[i]); | 87 | phy_exit(hpriv->phys[i]); |
97 | } | 88 | } |
@@ -144,6 +135,59 @@ void ahci_platform_disable_clks(struct ahci_host_priv *hpriv) | |||
144 | EXPORT_SYMBOL_GPL(ahci_platform_disable_clks); | 135 | EXPORT_SYMBOL_GPL(ahci_platform_disable_clks); |
145 | 136 | ||
146 | /** | 137 | /** |
138 | * ahci_platform_enable_regulators - Enable regulators | ||
139 | * @hpriv: host private area to store config values | ||
140 | * | ||
141 | * This function enables all the regulators found in | ||
142 | * hpriv->target_pwrs, if any. If a regulator fails to be enabled, it | ||
143 | * disables all the regulators already enabled in reverse order and | ||
144 | * returns an error. | ||
145 | * | ||
146 | * RETURNS: | ||
147 | * 0 on success otherwise a negative error code | ||
148 | */ | ||
149 | int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv) | ||
150 | { | ||
151 | int rc, i; | ||
152 | |||
153 | for (i = 0; i < hpriv->nports; i++) { | ||
154 | if (!hpriv->target_pwrs[i]) | ||
155 | continue; | ||
156 | |||
157 | rc = regulator_enable(hpriv->target_pwrs[i]); | ||
158 | if (rc) | ||
159 | goto disable_target_pwrs; | ||
160 | } | ||
161 | |||
162 | return 0; | ||
163 | |||
164 | disable_target_pwrs: | ||
165 | while (--i >= 0) | ||
166 | if (hpriv->target_pwrs[i]) | ||
167 | regulator_disable(hpriv->target_pwrs[i]); | ||
168 | |||
169 | return rc; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators); | ||
172 | |||
173 | /** | ||
174 | * ahci_platform_disable_regulators - Disable regulators | ||
175 | * @hpriv: host private area to store config values | ||
176 | * | ||
177 | * This function disables all regulators found in hpriv->target_pwrs. | ||
178 | */ | ||
179 | void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < hpriv->nports; i++) { | ||
184 | if (!hpriv->target_pwrs[i]) | ||
185 | continue; | ||
186 | regulator_disable(hpriv->target_pwrs[i]); | ||
187 | } | ||
188 | } | ||
189 | EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators); | ||
190 | /** | ||
147 | * ahci_platform_enable_resources - Enable platform resources | 191 | * ahci_platform_enable_resources - Enable platform resources |
148 | * @hpriv: host private area to store config values | 192 | * @hpriv: host private area to store config values |
149 | * | 193 | * |
@@ -163,11 +207,9 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv) | |||
163 | { | 207 | { |
164 | int rc; | 208 | int rc; |
165 | 209 | ||
166 | if (hpriv->target_pwr) { | 210 | rc = ahci_platform_enable_regulators(hpriv); |
167 | rc = regulator_enable(hpriv->target_pwr); | 211 | if (rc) |
168 | if (rc) | 212 | return rc; |
169 | return rc; | ||
170 | } | ||
171 | 213 | ||
172 | rc = ahci_platform_enable_clks(hpriv); | 214 | rc = ahci_platform_enable_clks(hpriv); |
173 | if (rc) | 215 | if (rc) |
@@ -183,8 +225,8 @@ disable_clks: | |||
183 | ahci_platform_disable_clks(hpriv); | 225 | ahci_platform_disable_clks(hpriv); |
184 | 226 | ||
185 | disable_regulator: | 227 | disable_regulator: |
186 | if (hpriv->target_pwr) | 228 | ahci_platform_disable_regulators(hpriv); |
187 | regulator_disable(hpriv->target_pwr); | 229 | |
188 | return rc; | 230 | return rc; |
189 | } | 231 | } |
190 | EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); | 232 | EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); |
@@ -205,8 +247,7 @@ void ahci_platform_disable_resources(struct ahci_host_priv *hpriv) | |||
205 | 247 | ||
206 | ahci_platform_disable_clks(hpriv); | 248 | ahci_platform_disable_clks(hpriv); |
207 | 249 | ||
208 | if (hpriv->target_pwr) | 250 | ahci_platform_disable_regulators(hpriv); |
209 | regulator_disable(hpriv->target_pwr); | ||
210 | } | 251 | } |
211 | EXPORT_SYMBOL_GPL(ahci_platform_disable_resources); | 252 | EXPORT_SYMBOL_GPL(ahci_platform_disable_resources); |
212 | 253 | ||
@@ -222,6 +263,69 @@ static void ahci_platform_put_resources(struct device *dev, void *res) | |||
222 | 263 | ||
223 | for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) | 264 | for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) |
224 | clk_put(hpriv->clks[c]); | 265 | clk_put(hpriv->clks[c]); |
266 | /* | ||
267 | * The regulators are tied to child node device and not to the | ||
268 | * SATA device itself. So we can't use devm for automatically | ||
269 | * releasing them. We have to do it manually here. | ||
270 | */ | ||
271 | for (c = 0; c < hpriv->nports; c++) | ||
272 | if (hpriv->target_pwrs && hpriv->target_pwrs[c]) | ||
273 | regulator_put(hpriv->target_pwrs[c]); | ||
274 | |||
275 | kfree(hpriv->target_pwrs); | ||
276 | } | ||
277 | |||
278 | static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, | ||
279 | struct device *dev, struct device_node *node) | ||
280 | { | ||
281 | int rc; | ||
282 | |||
283 | hpriv->phys[port] = devm_of_phy_get(dev, node, NULL); | ||
284 | |||
285 | if (!IS_ERR(hpriv->phys[port])) | ||
286 | return 0; | ||
287 | |||
288 | rc = PTR_ERR(hpriv->phys[port]); | ||
289 | switch (rc) { | ||
290 | case -ENOSYS: | ||
291 | /* No PHY support. Check if PHY is required. */ | ||
292 | if (of_find_property(node, "phys", NULL)) { | ||
293 | dev_err(dev, | ||
294 | "couldn't get PHY in node %s: ENOSYS\n", | ||
295 | node->name); | ||
296 | break; | ||
297 | } | ||
298 | case -ENODEV: | ||
299 | /* continue normally */ | ||
300 | hpriv->phys[port] = NULL; | ||
301 | rc = 0; | ||
302 | break; | ||
303 | |||
304 | default: | ||
305 | dev_err(dev, | ||
306 | "couldn't get PHY in node %s: %d\n", | ||
307 | node->name, rc); | ||
308 | |||
309 | break; | ||
310 | } | ||
311 | |||
312 | return rc; | ||
313 | } | ||
314 | |||
315 | static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port, | ||
316 | struct device *dev) | ||
317 | { | ||
318 | struct regulator *target_pwr; | ||
319 | int rc = 0; | ||
320 | |||
321 | target_pwr = regulator_get_optional(dev, "target"); | ||
322 | |||
323 | if (!IS_ERR(target_pwr)) | ||
324 | hpriv->target_pwrs[port] = target_pwr; | ||
325 | else | ||
326 | rc = PTR_ERR(target_pwr); | ||
327 | |||
328 | return rc; | ||
225 | } | 329 | } |
226 | 330 | ||
227 | /** | 331 | /** |
@@ -246,7 +350,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) | |||
246 | struct ahci_host_priv *hpriv; | 350 | struct ahci_host_priv *hpriv; |
247 | struct clk *clk; | 351 | struct clk *clk; |
248 | struct device_node *child; | 352 | struct device_node *child; |
249 | int i, enabled_ports = 0, rc = -ENOMEM; | 353 | int i, sz, enabled_ports = 0, rc = -ENOMEM, child_nodes; |
250 | u32 mask_port_map = 0; | 354 | u32 mask_port_map = 0; |
251 | 355 | ||
252 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) | 356 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) |
@@ -267,14 +371,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) | |||
267 | goto err_out; | 371 | goto err_out; |
268 | } | 372 | } |
269 | 373 | ||
270 | hpriv->target_pwr = devm_regulator_get_optional(dev, "target"); | ||
271 | if (IS_ERR(hpriv->target_pwr)) { | ||
272 | rc = PTR_ERR(hpriv->target_pwr); | ||
273 | if (rc == -EPROBE_DEFER) | ||
274 | goto err_out; | ||
275 | hpriv->target_pwr = NULL; | ||
276 | } | ||
277 | |||
278 | for (i = 0; i < AHCI_MAX_CLKS; i++) { | 374 | for (i = 0; i < AHCI_MAX_CLKS; i++) { |
279 | /* | 375 | /* |
280 | * For now we must use clk_get(dev, NULL) for the first clock, | 376 | * For now we must use clk_get(dev, NULL) for the first clock, |
@@ -296,19 +392,33 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) | |||
296 | hpriv->clks[i] = clk; | 392 | hpriv->clks[i] = clk; |
297 | } | 393 | } |
298 | 394 | ||
299 | hpriv->nports = of_get_child_count(dev->of_node); | 395 | hpriv->nports = child_nodes = of_get_child_count(dev->of_node); |
300 | 396 | ||
301 | if (hpriv->nports) { | 397 | /* |
302 | hpriv->phys = devm_kzalloc(dev, | 398 | * If no sub-node was found, we still need to set nports to |
303 | hpriv->nports * sizeof(*hpriv->phys), | 399 | * one in order to be able to use the |
304 | GFP_KERNEL); | 400 | * ahci_platform_[en|dis]able_[phys|regulators] functions. |
305 | if (!hpriv->phys) { | 401 | */ |
306 | rc = -ENOMEM; | 402 | if (!child_nodes) |
307 | goto err_out; | 403 | hpriv->nports = 1; |
308 | } | ||
309 | 404 | ||
405 | sz = hpriv->nports * sizeof(*hpriv->phys); | ||
406 | hpriv->phys = devm_kzalloc(dev, sz, GFP_KERNEL); | ||
407 | if (!hpriv->phys) { | ||
408 | rc = -ENOMEM; | ||
409 | goto err_out; | ||
410 | } | ||
411 | sz = hpriv->nports * sizeof(*hpriv->target_pwrs); | ||
412 | hpriv->target_pwrs = kzalloc(sz, GFP_KERNEL); | ||
413 | if (!hpriv->target_pwrs) { | ||
414 | rc = -ENOMEM; | ||
415 | goto err_out; | ||
416 | } | ||
417 | |||
418 | if (child_nodes) { | ||
310 | for_each_child_of_node(dev->of_node, child) { | 419 | for_each_child_of_node(dev->of_node, child) { |
311 | u32 port; | 420 | u32 port; |
421 | struct platform_device *port_dev __maybe_unused; | ||
312 | 422 | ||
313 | if (!of_device_is_available(child)) | 423 | if (!of_device_is_available(child)) |
314 | continue; | 424 | continue; |
@@ -322,17 +432,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) | |||
322 | dev_warn(dev, "invalid port number %d\n", port); | 432 | dev_warn(dev, "invalid port number %d\n", port); |
323 | continue; | 433 | continue; |
324 | } | 434 | } |
325 | |||
326 | mask_port_map |= BIT(port); | 435 | mask_port_map |= BIT(port); |
327 | 436 | ||
328 | hpriv->phys[port] = devm_of_phy_get(dev, child, NULL); | 437 | #ifdef CONFIG_OF_ADDRESS |
329 | if (IS_ERR(hpriv->phys[port])) { | 438 | of_platform_device_create(child, NULL, NULL); |
330 | rc = PTR_ERR(hpriv->phys[port]); | 439 | |
331 | dev_err(dev, | 440 | port_dev = of_find_device_by_node(child); |
332 | "couldn't get PHY in node %s: %d\n", | 441 | |
333 | child->name, rc); | 442 | if (port_dev) { |
334 | goto err_out; | 443 | rc = ahci_platform_get_regulator(hpriv, port, |
444 | &port_dev->dev); | ||
445 | if (rc == -EPROBE_DEFER) | ||
446 | goto err_out; | ||
335 | } | 447 | } |
448 | #endif | ||
449 | |||
450 | rc = ahci_platform_get_phy(hpriv, port, dev, child); | ||
451 | if (rc) | ||
452 | goto err_out; | ||
336 | 453 | ||
337 | enabled_ports++; | 454 | enabled_ports++; |
338 | } | 455 | } |
@@ -349,38 +466,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) | |||
349 | * If no sub-node was found, keep this for device tree | 466 | * If no sub-node was found, keep this for device tree |
350 | * compatibility | 467 | * compatibility |
351 | */ | 468 | */ |
352 | struct phy *phy = devm_phy_get(dev, "sata-phy"); | 469 | rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node); |
353 | if (!IS_ERR(phy)) { | 470 | if (rc) |
354 | hpriv->phys = devm_kzalloc(dev, sizeof(*hpriv->phys), | 471 | goto err_out; |
355 | GFP_KERNEL); | ||
356 | if (!hpriv->phys) { | ||
357 | rc = -ENOMEM; | ||
358 | goto err_out; | ||
359 | } | ||
360 | |||
361 | hpriv->phys[0] = phy; | ||
362 | hpriv->nports = 1; | ||
363 | } else { | ||
364 | rc = PTR_ERR(phy); | ||
365 | switch (rc) { | ||
366 | case -ENOSYS: | ||
367 | /* No PHY support. Check if PHY is required. */ | ||
368 | if (of_find_property(dev->of_node, "phys", NULL)) { | ||
369 | dev_err(dev, "couldn't get sata-phy: ENOSYS\n"); | ||
370 | goto err_out; | ||
371 | } | ||
372 | case -ENODEV: | ||
373 | /* continue normally */ | ||
374 | hpriv->phys = NULL; | ||
375 | break; | ||
376 | |||
377 | default: | ||
378 | goto err_out; | ||
379 | 472 | ||
380 | } | 473 | rc = ahci_platform_get_regulator(hpriv, 0, dev); |
381 | } | 474 | if (rc == -EPROBE_DEFER) |
475 | goto err_out; | ||
382 | } | 476 | } |
383 | |||
384 | pm_runtime_enable(dev); | 477 | pm_runtime_enable(dev); |
385 | pm_runtime_get_sync(dev); | 478 | pm_runtime_get_sync(dev); |
386 | hpriv->got_runtime_pm = true; | 479 | hpriv->got_runtime_pm = true; |
@@ -399,6 +492,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources); | |||
399 | * @pdev: platform device pointer for the host | 492 | * @pdev: platform device pointer for the host |
400 | * @hpriv: ahci-host private data for the host | 493 | * @hpriv: ahci-host private data for the host |
401 | * @pi_template: template for the ata_port_info to use | 494 | * @pi_template: template for the ata_port_info to use |
495 | * @sht: scsi_host_template to use when registering | ||
402 | * | 496 | * |
403 | * This function does all the usual steps needed to bring up an | 497 | * This function does all the usual steps needed to bring up an |
404 | * ahci-platform host, note any necessary resources (ie clks, phys, etc.) | 498 | * ahci-platform host, note any necessary resources (ie clks, phys, etc.) |
@@ -409,7 +503,8 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources); | |||
409 | */ | 503 | */ |
410 | int ahci_platform_init_host(struct platform_device *pdev, | 504 | int ahci_platform_init_host(struct platform_device *pdev, |
411 | struct ahci_host_priv *hpriv, | 505 | struct ahci_host_priv *hpriv, |
412 | const struct ata_port_info *pi_template) | 506 | const struct ata_port_info *pi_template, |
507 | struct scsi_host_template *sht) | ||
413 | { | 508 | { |
414 | struct device *dev = &pdev->dev; | 509 | struct device *dev = &pdev->dev; |
415 | struct ata_port_info pi = *pi_template; | 510 | struct ata_port_info pi = *pi_template; |
@@ -493,7 +588,7 @@ int ahci_platform_init_host(struct platform_device *pdev, | |||
493 | ahci_init_controller(host); | 588 | ahci_init_controller(host); |
494 | ahci_print_info(host, "platform"); | 589 | ahci_print_info(host, "platform"); |
495 | 590 | ||
496 | return ahci_host_activate(host, irq, &ahci_platform_sht); | 591 | return ahci_host_activate(host, irq, sht); |
497 | } | 592 | } |
498 | EXPORT_SYMBOL_GPL(ahci_platform_init_host); | 593 | EXPORT_SYMBOL_GPL(ahci_platform_init_host); |
499 | 594 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index d1a05f9bb91f..4b0d5e71858e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1752,33 +1752,6 @@ unsigned ata_exec_internal(struct ata_device *dev, | |||
1752 | } | 1752 | } |
1753 | 1753 | ||
1754 | /** | 1754 | /** |
1755 | * ata_do_simple_cmd - execute simple internal command | ||
1756 | * @dev: Device to which the command is sent | ||
1757 | * @cmd: Opcode to execute | ||
1758 | * | ||
1759 | * Execute a 'simple' command, that only consists of the opcode | ||
1760 | * 'cmd' itself, without filling any other registers | ||
1761 | * | ||
1762 | * LOCKING: | ||
1763 | * Kernel thread context (may sleep). | ||
1764 | * | ||
1765 | * RETURNS: | ||
1766 | * Zero on success, AC_ERR_* mask on failure | ||
1767 | */ | ||
1768 | unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) | ||
1769 | { | ||
1770 | struct ata_taskfile tf; | ||
1771 | |||
1772 | ata_tf_init(dev, &tf); | ||
1773 | |||
1774 | tf.command = cmd; | ||
1775 | tf.flags |= ATA_TFLAG_DEVICE; | ||
1776 | tf.protocol = ATA_PROT_NODATA; | ||
1777 | |||
1778 | return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); | ||
1779 | } | ||
1780 | |||
1781 | /** | ||
1782 | * ata_pio_need_iordy - check if iordy needed | 1755 | * ata_pio_need_iordy - check if iordy needed |
1783 | * @adev: ATA device | 1756 | * @adev: ATA device |
1784 | * | 1757 | * |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 8d00c2638bed..a9f5aed32d39 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -1635,7 +1635,6 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev, | |||
1635 | 1635 | ||
1636 | DPRINTK("ATAPI request sense\n"); | 1636 | DPRINTK("ATAPI request sense\n"); |
1637 | 1637 | ||
1638 | /* FIXME: is this needed? */ | ||
1639 | memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); | 1638 | memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); |
1640 | 1639 | ||
1641 | /* initialize sense_buf with the error register, | 1640 | /* initialize sense_buf with the error register, |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 6abd17a85b13..280729325ebd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -1995,8 +1995,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) | |||
1995 | 1995 | ||
1996 | VPRINTK("ENTER\n"); | 1996 | VPRINTK("ENTER\n"); |
1997 | 1997 | ||
1998 | /* set scsi removeable (RMB) bit per ata bit */ | 1998 | /* set scsi removable (RMB) bit per ata bit */ |
1999 | if (ata_id_removeable(args->id)) | 1999 | if (ata_id_removable(args->id)) |
2000 | hdr[1] |= (1 << 7); | 2000 | hdr[1] |= (1 << 7); |
2001 | 2001 | ||
2002 | if (args->dev->class == ATA_DEV_ZAC) { | 2002 | if (args->dev->class == ATA_DEV_ZAC) { |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 5f4e0cca56ec..82ebe263d2f1 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -76,7 +76,6 @@ extern unsigned ata_exec_internal_sg(struct ata_device *dev, | |||
76 | struct ata_taskfile *tf, const u8 *cdb, | 76 | struct ata_taskfile *tf, const u8 *cdb, |
77 | int dma_dir, struct scatterlist *sg, | 77 | int dma_dir, struct scatterlist *sg, |
78 | unsigned int n_elem, unsigned long timeout); | 78 | unsigned int n_elem, unsigned long timeout); |
79 | extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd); | ||
80 | extern int ata_wait_ready(struct ata_link *link, unsigned long deadline, | 79 | extern int ata_wait_ready(struct ata_link *link, unsigned long deadline, |
81 | int (*check_ready)(struct ata_link *link)); | 80 | int (*check_ready)(struct ata_link *link)); |
82 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | 81 | extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, |
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index 48ae4b434474..f9ca72e937ee 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c | |||
@@ -276,10 +276,8 @@ static int cs5530_init_chip(void) | |||
276 | pci_dev_put(cs5530_0); | 276 | pci_dev_put(cs5530_0); |
277 | return 0; | 277 | return 0; |
278 | fail_put: | 278 | fail_put: |
279 | if (master_0) | 279 | pci_dev_put(master_0); |
280 | pci_dev_put(master_0); | 280 | pci_dev_put(cs5530_0); |
281 | if (cs5530_0) | ||
282 | pci_dev_put(cs5530_0); | ||
283 | return -ENODEV; | 281 | return -ENODEV; |
284 | } | 282 | } |
285 | 283 | ||
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c index dcc408abe171..b6b7af894d9d 100644 --- a/drivers/ata/pata_of_platform.c +++ b/drivers/ata/pata_of_platform.c | |||
@@ -16,6 +16,12 @@ | |||
16 | #include <linux/ata_platform.h> | 16 | #include <linux/ata_platform.h> |
17 | #include <linux/libata.h> | 17 | #include <linux/libata.h> |
18 | 18 | ||
19 | #define DRV_NAME "pata_of_platform" | ||
20 | |||
21 | static struct scsi_host_template pata_platform_sht = { | ||
22 | ATA_PIO_SHT(DRV_NAME), | ||
23 | }; | ||
24 | |||
19 | static int pata_of_platform_probe(struct platform_device *ofdev) | 25 | static int pata_of_platform_probe(struct platform_device *ofdev) |
20 | { | 26 | { |
21 | int ret; | 27 | int ret; |
@@ -63,7 +69,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev) | |||
63 | pio_mask |= (1 << pio_mode) - 1; | 69 | pio_mask |= (1 << pio_mode) - 1; |
64 | 70 | ||
65 | return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res, | 71 | return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res, |
66 | reg_shift, pio_mask); | 72 | reg_shift, pio_mask, &pata_platform_sht); |
67 | } | 73 | } |
68 | 74 | ||
69 | static struct of_device_id pata_of_platform_match[] = { | 75 | static struct of_device_id pata_of_platform_match[] = { |
@@ -74,7 +80,7 @@ MODULE_DEVICE_TABLE(of, pata_of_platform_match); | |||
74 | 80 | ||
75 | static struct platform_driver pata_of_platform_driver = { | 81 | static struct platform_driver pata_of_platform_driver = { |
76 | .driver = { | 82 | .driver = { |
77 | .name = "pata_of_platform", | 83 | .name = DRV_NAME, |
78 | .of_match_table = pata_of_platform_match, | 84 | .of_match_table = pata_of_platform_match, |
79 | }, | 85 | }, |
80 | .probe = pata_of_platform_probe, | 86 | .probe = pata_of_platform_probe, |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 4d06a5cda987..dca8251b1aea 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/blkdev.h> | 28 | #include <linux/blkdev.h> |
29 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/ktime.h> | ||
31 | #include <scsi/scsi.h> | 32 | #include <scsi/scsi.h> |
32 | #include <scsi/scsi_host.h> | 33 | #include <scsi/scsi_host.h> |
33 | #include <scsi/scsi_cmnd.h> | 34 | #include <scsi/scsi_cmnd.h> |
@@ -605,7 +606,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) | |||
605 | void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR]; | 606 | void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR]; |
606 | u32 scr; | 607 | u32 scr; |
607 | long start_count, end_count; | 608 | long start_count, end_count; |
608 | struct timeval start_time, end_time; | 609 | ktime_t start_time, end_time; |
609 | long pll_clock, usec_elapsed; | 610 | long pll_clock, usec_elapsed; |
610 | 611 | ||
611 | /* Start the test mode */ | 612 | /* Start the test mode */ |
@@ -616,14 +617,14 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) | |||
616 | 617 | ||
617 | /* Read current counter value */ | 618 | /* Read current counter value */ |
618 | start_count = pdc_read_counter(host); | 619 | start_count = pdc_read_counter(host); |
619 | do_gettimeofday(&start_time); | 620 | start_time = ktime_get(); |
620 | 621 | ||
621 | /* Let the counter run for 100 ms. */ | 622 | /* Let the counter run for 100 ms. */ |
622 | mdelay(100); | 623 | mdelay(100); |
623 | 624 | ||
624 | /* Read the counter values again */ | 625 | /* Read the counter values again */ |
625 | end_count = pdc_read_counter(host); | 626 | end_count = pdc_read_counter(host); |
626 | do_gettimeofday(&end_time); | 627 | end_time = ktime_get(); |
627 | 628 | ||
628 | /* Stop the test mode */ | 629 | /* Stop the test mode */ |
629 | scr = ioread32(mmio_base + PDC_SYS_CTL); | 630 | scr = ioread32(mmio_base + PDC_SYS_CTL); |
@@ -632,8 +633,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) | |||
632 | ioread32(mmio_base + PDC_SYS_CTL); /* flush */ | 633 | ioread32(mmio_base + PDC_SYS_CTL); /* flush */ |
633 | 634 | ||
634 | /* calculate the input clock in Hz */ | 635 | /* calculate the input clock in Hz */ |
635 | usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + | 636 | usec_elapsed = (long) ktime_us_delta(end_time, start_time); |
636 | (end_time.tv_usec - start_time.tv_usec); | ||
637 | 637 | ||
638 | pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 * | 638 | pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 * |
639 | (100000000 / usec_elapsed); | 639 | (100000000 / usec_elapsed); |
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 1eedfe46d7c8..c503ded87bb8 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c | |||
@@ -78,6 +78,7 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr, | |||
78 | * @irq_res: Resource representing IRQ and its flags | 78 | * @irq_res: Resource representing IRQ and its flags |
79 | * @ioport_shift: I/O port shift | 79 | * @ioport_shift: I/O port shift |
80 | * @__pio_mask: PIO mask | 80 | * @__pio_mask: PIO mask |
81 | * @sht: scsi_host_template to use when registering | ||
81 | * | 82 | * |
82 | * Register a platform bus IDE interface. Such interfaces are PIO and we | 83 | * Register a platform bus IDE interface. Such interfaces are PIO and we |
83 | * assume do not support IRQ sharing. | 84 | * assume do not support IRQ sharing. |
@@ -99,7 +100,8 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr, | |||
99 | */ | 100 | */ |
100 | int __pata_platform_probe(struct device *dev, struct resource *io_res, | 101 | int __pata_platform_probe(struct device *dev, struct resource *io_res, |
101 | struct resource *ctl_res, struct resource *irq_res, | 102 | struct resource *ctl_res, struct resource *irq_res, |
102 | unsigned int ioport_shift, int __pio_mask) | 103 | unsigned int ioport_shift, int __pio_mask, |
104 | struct scsi_host_template *sht) | ||
103 | { | 105 | { |
104 | struct ata_host *host; | 106 | struct ata_host *host; |
105 | struct ata_port *ap; | 107 | struct ata_port *ap; |
@@ -170,7 +172,7 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res, | |||
170 | 172 | ||
171 | /* activate */ | 173 | /* activate */ |
172 | return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL, | 174 | return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL, |
173 | irq_flags, &pata_platform_sht); | 175 | irq_flags, sht); |
174 | } | 176 | } |
175 | EXPORT_SYMBOL_GPL(__pata_platform_probe); | 177 | EXPORT_SYMBOL_GPL(__pata_platform_probe); |
176 | 178 | ||
@@ -216,7 +218,7 @@ static int pata_platform_probe(struct platform_device *pdev) | |||
216 | 218 | ||
217 | return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res, | 219 | return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res, |
218 | pp_info ? pp_info->ioport_shift : 0, | 220 | pp_info ? pp_info->ioport_shift : 0, |
219 | pio_mask); | 221 | pio_mask, &pata_platform_sht); |
220 | } | 222 | } |
221 | 223 | ||
222 | static struct platform_driver pata_platform_driver = { | 224 | static struct platform_driver pata_platform_driver = { |
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 8e8248179d20..fdb0f2879ea7 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
@@ -48,6 +48,18 @@ | |||
48 | #define DRV_NAME "sata-dwc" | 48 | #define DRV_NAME "sata-dwc" |
49 | #define DRV_VERSION "1.3" | 49 | #define DRV_VERSION "1.3" |
50 | 50 | ||
51 | #ifndef out_le32 | ||
52 | #define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a)) | ||
53 | #endif | ||
54 | |||
55 | #ifndef in_le32 | ||
56 | #define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a))) | ||
57 | #endif | ||
58 | |||
59 | #ifndef NO_IRQ | ||
60 | #define NO_IRQ 0 | ||
61 | #endif | ||
62 | |||
51 | /* SATA DMA driver Globals */ | 63 | /* SATA DMA driver Globals */ |
52 | #define DMA_NUM_CHANS 1 | 64 | #define DMA_NUM_CHANS 1 |
53 | #define DMA_NUM_CHAN_REGS 8 | 65 | #define DMA_NUM_CHAN_REGS 8 |
@@ -273,7 +285,7 @@ struct sata_dwc_device { | |||
273 | struct device *dev; /* generic device struct */ | 285 | struct device *dev; /* generic device struct */ |
274 | struct ata_probe_ent *pe; /* ptr to probe-ent */ | 286 | struct ata_probe_ent *pe; /* ptr to probe-ent */ |
275 | struct ata_host *host; | 287 | struct ata_host *host; |
276 | u8 *reg_base; | 288 | u8 __iomem *reg_base; |
277 | struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ | 289 | struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */ |
278 | int irq_dma; | 290 | int irq_dma; |
279 | }; | 291 | }; |
@@ -323,7 +335,9 @@ struct sata_dwc_host_priv { | |||
323 | struct device *dwc_dev; | 335 | struct device *dwc_dev; |
324 | int dma_channel; | 336 | int dma_channel; |
325 | }; | 337 | }; |
326 | struct sata_dwc_host_priv host_pvt; | 338 | |
339 | static struct sata_dwc_host_priv host_pvt; | ||
340 | |||
327 | /* | 341 | /* |
328 | * Prototypes | 342 | * Prototypes |
329 | */ | 343 | */ |
@@ -580,9 +594,9 @@ static int map_sg_to_lli(struct scatterlist *sg, int num_elems, | |||
580 | 594 | ||
581 | sms_val = 0; | 595 | sms_val = 0; |
582 | dms_val = 1 + host_pvt.dma_channel; | 596 | dms_val = 1 + host_pvt.dma_channel; |
583 | dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x" | 597 | dev_dbg(host_pvt.dwc_dev, |
584 | " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli, | 598 | "%s: sg=%p nelem=%d lli=%p dma_lli=0x%pad dmadr=0x%p\n", |
585 | (u32)dmadr_addr); | 599 | __func__, sg, num_elems, lli, &dma_lli, dmadr_addr); |
586 | 600 | ||
587 | bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); | 601 | bl = get_burst_length_encode(AHB_DMA_BRST_DFLT); |
588 | 602 | ||
@@ -773,7 +787,7 @@ static void dma_dwc_exit(struct sata_dwc_device *hsdev) | |||
773 | { | 787 | { |
774 | dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); | 788 | dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__); |
775 | if (host_pvt.sata_dma_regs) { | 789 | if (host_pvt.sata_dma_regs) { |
776 | iounmap(host_pvt.sata_dma_regs); | 790 | iounmap((void __iomem *)host_pvt.sata_dma_regs); |
777 | host_pvt.sata_dma_regs = NULL; | 791 | host_pvt.sata_dma_regs = NULL; |
778 | } | 792 | } |
779 | 793 | ||
@@ -818,7 +832,7 @@ static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) | |||
818 | return -EINVAL; | 832 | return -EINVAL; |
819 | } | 833 | } |
820 | 834 | ||
821 | *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4)); | 835 | *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4)); |
822 | dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", | 836 | dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n", |
823 | __func__, link->ap->print_id, scr, *val); | 837 | __func__, link->ap->print_id, scr, *val); |
824 | 838 | ||
@@ -834,21 +848,19 @@ static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) | |||
834 | __func__, scr); | 848 | __func__, scr); |
835 | return -EINVAL; | 849 | return -EINVAL; |
836 | } | 850 | } |
837 | out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val); | 851 | out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val); |
838 | 852 | ||
839 | return 0; | 853 | return 0; |
840 | } | 854 | } |
841 | 855 | ||
842 | static u32 core_scr_read(unsigned int scr) | 856 | static u32 core_scr_read(unsigned int scr) |
843 | { | 857 | { |
844 | return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\ | 858 | return in_le32(host_pvt.scr_addr_sstatus + (scr * 4)); |
845 | (scr * 4)); | ||
846 | } | 859 | } |
847 | 860 | ||
848 | static void core_scr_write(unsigned int scr, u32 val) | 861 | static void core_scr_write(unsigned int scr, u32 val) |
849 | { | 862 | { |
850 | out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4), | 863 | out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val); |
851 | val); | ||
852 | } | 864 | } |
853 | 865 | ||
854 | static void clear_serror(void) | 866 | static void clear_serror(void) |
@@ -856,7 +868,6 @@ static void clear_serror(void) | |||
856 | u32 val; | 868 | u32 val; |
857 | val = core_scr_read(SCR_ERROR); | 869 | val = core_scr_read(SCR_ERROR); |
858 | core_scr_write(SCR_ERROR, val); | 870 | core_scr_write(SCR_ERROR, val); |
859 | |||
860 | } | 871 | } |
861 | 872 | ||
862 | static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) | 873 | static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) |
@@ -1256,24 +1267,24 @@ static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) | |||
1256 | 1267 | ||
1257 | static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) | 1268 | static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base) |
1258 | { | 1269 | { |
1259 | port->cmd_addr = (void *)base + 0x00; | 1270 | port->cmd_addr = (void __iomem *)base + 0x00; |
1260 | port->data_addr = (void *)base + 0x00; | 1271 | port->data_addr = (void __iomem *)base + 0x00; |
1261 | 1272 | ||
1262 | port->error_addr = (void *)base + 0x04; | 1273 | port->error_addr = (void __iomem *)base + 0x04; |
1263 | port->feature_addr = (void *)base + 0x04; | 1274 | port->feature_addr = (void __iomem *)base + 0x04; |
1264 | 1275 | ||
1265 | port->nsect_addr = (void *)base + 0x08; | 1276 | port->nsect_addr = (void __iomem *)base + 0x08; |
1266 | 1277 | ||
1267 | port->lbal_addr = (void *)base + 0x0c; | 1278 | port->lbal_addr = (void __iomem *)base + 0x0c; |
1268 | port->lbam_addr = (void *)base + 0x10; | 1279 | port->lbam_addr = (void __iomem *)base + 0x10; |
1269 | port->lbah_addr = (void *)base + 0x14; | 1280 | port->lbah_addr = (void __iomem *)base + 0x14; |
1270 | 1281 | ||
1271 | port->device_addr = (void *)base + 0x18; | 1282 | port->device_addr = (void __iomem *)base + 0x18; |
1272 | port->command_addr = (void *)base + 0x1c; | 1283 | port->command_addr = (void __iomem *)base + 0x1c; |
1273 | port->status_addr = (void *)base + 0x1c; | 1284 | port->status_addr = (void __iomem *)base + 0x1c; |
1274 | 1285 | ||
1275 | port->altstatus_addr = (void *)base + 0x20; | 1286 | port->altstatus_addr = (void __iomem *)base + 0x20; |
1276 | port->ctl_addr = (void *)base + 0x20; | 1287 | port->ctl_addr = (void __iomem *)base + 0x20; |
1277 | } | 1288 | } |
1278 | 1289 | ||
1279 | /* | 1290 | /* |
@@ -1314,7 +1325,7 @@ static int sata_dwc_port_start(struct ata_port *ap) | |||
1314 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) | 1325 | for (i = 0; i < SATA_DWC_QCMD_MAX; i++) |
1315 | hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; | 1326 | hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; |
1316 | 1327 | ||
1317 | ap->bmdma_prd = 0; /* set these so libata doesn't use them */ | 1328 | ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ |
1318 | ap->bmdma_prd_dma = 0; | 1329 | ap->bmdma_prd_dma = 0; |
1319 | 1330 | ||
1320 | /* | 1331 | /* |
@@ -1511,8 +1522,8 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag) | |||
1511 | 1522 | ||
1512 | dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], | 1523 | dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag], |
1513 | hsdevp->llit_dma[tag], | 1524 | hsdevp->llit_dma[tag], |
1514 | (void *__iomem)(&hsdev->sata_dwc_regs->\ | 1525 | (void __iomem *)&hsdev->sata_dwc_regs->dmadr, |
1515 | dmadr), qc->dma_dir); | 1526 | qc->dma_dir); |
1516 | if (dma_chan < 0) { | 1527 | if (dma_chan < 0) { |
1517 | dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", | 1528 | dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n", |
1518 | __func__, dma_chan); | 1529 | __func__, dma_chan); |
@@ -1585,8 +1596,8 @@ static void sata_dwc_error_handler(struct ata_port *ap) | |||
1585 | ata_sff_error_handler(ap); | 1596 | ata_sff_error_handler(ap); |
1586 | } | 1597 | } |
1587 | 1598 | ||
1588 | int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, | 1599 | static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, |
1589 | unsigned long deadline) | 1600 | unsigned long deadline) |
1590 | { | 1601 | { |
1591 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); | 1602 | struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); |
1592 | int ret; | 1603 | int ret; |
@@ -1618,7 +1629,7 @@ static struct scsi_host_template sata_dwc_sht = { | |||
1618 | * max of 1. This will get fixed in in a future release. | 1629 | * max of 1. This will get fixed in in a future release. |
1619 | */ | 1630 | */ |
1620 | .sg_tablesize = LIBATA_MAX_PRD, | 1631 | .sg_tablesize = LIBATA_MAX_PRD, |
1621 | .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */ | 1632 | /* .can_queue = ATA_MAX_QUEUE, */ |
1622 | .dma_boundary = ATA_DMA_BOUNDARY, | 1633 | .dma_boundary = ATA_DMA_BOUNDARY, |
1623 | }; | 1634 | }; |
1624 | 1635 | ||
@@ -1655,7 +1666,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1655 | struct sata_dwc_device *hsdev; | 1666 | struct sata_dwc_device *hsdev; |
1656 | u32 idr, versionr; | 1667 | u32 idr, versionr; |
1657 | char *ver = (char *)&versionr; | 1668 | char *ver = (char *)&versionr; |
1658 | u8 *base = NULL; | 1669 | u8 __iomem *base; |
1659 | int err = 0; | 1670 | int err = 0; |
1660 | int irq; | 1671 | int irq; |
1661 | struct ata_host *host; | 1672 | struct ata_host *host; |
@@ -1665,12 +1676,12 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1665 | u32 dma_chan; | 1676 | u32 dma_chan; |
1666 | 1677 | ||
1667 | /* Allocate DWC SATA device */ | 1678 | /* Allocate DWC SATA device */ |
1668 | hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); | 1679 | host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); |
1669 | if (hsdev == NULL) { | 1680 | hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL); |
1670 | dev_err(&ofdev->dev, "kmalloc failed for hsdev\n"); | 1681 | if (!host || !hsdev) |
1671 | err = -ENOMEM; | 1682 | return -ENOMEM; |
1672 | goto error; | 1683 | |
1673 | } | 1684 | host->private_data = hsdev; |
1674 | 1685 | ||
1675 | if (of_property_read_u32(np, "dma-channel", &dma_chan)) { | 1686 | if (of_property_read_u32(np, "dma-channel", &dma_chan)) { |
1676 | dev_warn(&ofdev->dev, "no dma-channel property set." | 1687 | dev_warn(&ofdev->dev, "no dma-channel property set." |
@@ -1680,12 +1691,11 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1680 | host_pvt.dma_channel = dma_chan; | 1691 | host_pvt.dma_channel = dma_chan; |
1681 | 1692 | ||
1682 | /* Ioremap SATA registers */ | 1693 | /* Ioremap SATA registers */ |
1683 | base = of_iomap(ofdev->dev.of_node, 0); | 1694 | base = of_iomap(np, 0); |
1684 | if (!base) { | 1695 | if (!base) { |
1685 | dev_err(&ofdev->dev, "ioremap failed for SATA register" | 1696 | dev_err(&ofdev->dev, "ioremap failed for SATA register" |
1686 | " address\n"); | 1697 | " address\n"); |
1687 | err = -ENODEV; | 1698 | return -ENODEV; |
1688 | goto error_kmalloc; | ||
1689 | } | 1699 | } |
1690 | hsdev->reg_base = base; | 1700 | hsdev->reg_base = base; |
1691 | dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); | 1701 | dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); |
@@ -1693,16 +1703,6 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1693 | /* Synopsys DWC SATA specific Registers */ | 1703 | /* Synopsys DWC SATA specific Registers */ |
1694 | hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); | 1704 | hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET); |
1695 | 1705 | ||
1696 | /* Allocate and fill host */ | ||
1697 | host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); | ||
1698 | if (!host) { | ||
1699 | dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n"); | ||
1700 | err = -ENOMEM; | ||
1701 | goto error_iomap; | ||
1702 | } | ||
1703 | |||
1704 | host->private_data = hsdev; | ||
1705 | |||
1706 | /* Setup port */ | 1706 | /* Setup port */ |
1707 | host->ports[0]->ioaddr.cmd_addr = base; | 1707 | host->ports[0]->ioaddr.cmd_addr = base; |
1708 | host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; | 1708 | host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; |
@@ -1716,7 +1716,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1716 | idr, ver[0], ver[1], ver[2]); | 1716 | idr, ver[0], ver[1], ver[2]); |
1717 | 1717 | ||
1718 | /* Get SATA DMA interrupt number */ | 1718 | /* Get SATA DMA interrupt number */ |
1719 | irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); | 1719 | irq = irq_of_parse_and_map(np, 1); |
1720 | if (irq == NO_IRQ) { | 1720 | if (irq == NO_IRQ) { |
1721 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); | 1721 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); |
1722 | err = -ENODEV; | 1722 | err = -ENODEV; |
@@ -1724,7 +1724,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1724 | } | 1724 | } |
1725 | 1725 | ||
1726 | /* Get physical SATA DMA register base address */ | 1726 | /* Get physical SATA DMA register base address */ |
1727 | host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1); | 1727 | host_pvt.sata_dma_regs = (void *)of_iomap(np, 1); |
1728 | if (!(host_pvt.sata_dma_regs)) { | 1728 | if (!(host_pvt.sata_dma_regs)) { |
1729 | dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" | 1729 | dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" |
1730 | " address\n"); | 1730 | " address\n"); |
@@ -1744,7 +1744,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
1744 | sata_dwc_enable_interrupts(hsdev); | 1744 | sata_dwc_enable_interrupts(hsdev); |
1745 | 1745 | ||
1746 | /* Get SATA interrupt number */ | 1746 | /* Get SATA interrupt number */ |
1747 | irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); | 1747 | irq = irq_of_parse_and_map(np, 0); |
1748 | if (irq == NO_IRQ) { | 1748 | if (irq == NO_IRQ) { |
1749 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); | 1749 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); |
1750 | err = -ENODEV; | 1750 | err = -ENODEV; |
@@ -1770,9 +1770,6 @@ error_dma_iomap: | |||
1770 | iounmap((void __iomem *)host_pvt.sata_dma_regs); | 1770 | iounmap((void __iomem *)host_pvt.sata_dma_regs); |
1771 | error_iomap: | 1771 | error_iomap: |
1772 | iounmap(base); | 1772 | iounmap(base); |
1773 | error_kmalloc: | ||
1774 | kfree(hsdev); | ||
1775 | error: | ||
1776 | return err; | 1773 | return err; |
1777 | } | 1774 | } |
1778 | 1775 | ||
@@ -1783,15 +1780,12 @@ static int sata_dwc_remove(struct platform_device *ofdev) | |||
1783 | struct sata_dwc_device *hsdev = host->private_data; | 1780 | struct sata_dwc_device *hsdev = host->private_data; |
1784 | 1781 | ||
1785 | ata_host_detach(host); | 1782 | ata_host_detach(host); |
1786 | dev_set_drvdata(dev, NULL); | ||
1787 | 1783 | ||
1788 | /* Free SATA DMA resources */ | 1784 | /* Free SATA DMA resources */ |
1789 | dma_dwc_exit(hsdev); | 1785 | dma_dwc_exit(hsdev); |
1790 | 1786 | ||
1791 | iounmap((void __iomem *)host_pvt.sata_dma_regs); | 1787 | iounmap((void __iomem *)host_pvt.sata_dma_regs); |
1792 | iounmap(hsdev->reg_base); | 1788 | iounmap(hsdev->reg_base); |
1793 | kfree(hsdev); | ||
1794 | kfree(host); | ||
1795 | dev_dbg(&ofdev->dev, "done\n"); | 1789 | dev_dbg(&ofdev->dev, "done\n"); |
1796 | return 0; | 1790 | return 0; |
1797 | } | 1791 | } |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index f9a0e34eb111..f8c33e3772b8 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -4185,8 +4185,7 @@ err: | |||
4185 | clk_disable_unprepare(hpriv->port_clks[port]); | 4185 | clk_disable_unprepare(hpriv->port_clks[port]); |
4186 | clk_put(hpriv->port_clks[port]); | 4186 | clk_put(hpriv->port_clks[port]); |
4187 | } | 4187 | } |
4188 | if (hpriv->port_phys[port]) | 4188 | phy_power_off(hpriv->port_phys[port]); |
4189 | phy_power_off(hpriv->port_phys[port]); | ||
4190 | } | 4189 | } |
4191 | 4190 | ||
4192 | return rc; | 4191 | return rc; |
@@ -4216,8 +4215,7 @@ static int mv_platform_remove(struct platform_device *pdev) | |||
4216 | clk_disable_unprepare(hpriv->port_clks[port]); | 4215 | clk_disable_unprepare(hpriv->port_clks[port]); |
4217 | clk_put(hpriv->port_clks[port]); | 4216 | clk_put(hpriv->port_clks[port]); |
4218 | } | 4217 | } |
4219 | if (hpriv->port_phys[port]) | 4218 | phy_power_off(hpriv->port_phys[port]); |
4220 | phy_power_off(hpriv->port_phys[port]); | ||
4221 | } | 4219 | } |
4222 | return 0; | 4220 | return 0; |
4223 | } | 4221 | } |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index cb0d2e644af5..d49a5193b7de 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c | |||
@@ -2,8 +2,8 @@ | |||
2 | * Renesas R-Car SATA driver | 2 | * Renesas R-Car SATA driver |
3 | * | 3 | * |
4 | * Author: Vladimir Barinov <source@cogentembedded.com> | 4 | * Author: Vladimir Barinov <source@cogentembedded.com> |
5 | * Copyright (C) 2013 Cogent Embedded, Inc. | 5 | * Copyright (C) 2013-2015 Cogent Embedded, Inc. |
6 | * Copyright (C) 2013 Renesas Solutions Corp. | 6 | * Copyright (C) 2013-2015 Renesas Solutions Corp. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the | 9 | * under the terms of the GNU General Public License as published by the |
@@ -992,9 +992,30 @@ static int sata_rcar_resume(struct device *dev) | |||
992 | return 0; | 992 | return 0; |
993 | } | 993 | } |
994 | 994 | ||
995 | static int sata_rcar_restore(struct device *dev) | ||
996 | { | ||
997 | struct ata_host *host = dev_get_drvdata(dev); | ||
998 | struct sata_rcar_priv *priv = host->private_data; | ||
999 | |||
1000 | clk_prepare_enable(priv->clk); | ||
1001 | |||
1002 | sata_rcar_setup_port(host); | ||
1003 | |||
1004 | /* initialize host controller */ | ||
1005 | sata_rcar_init_controller(host); | ||
1006 | |||
1007 | ata_host_resume(host); | ||
1008 | |||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
995 | static const struct dev_pm_ops sata_rcar_pm_ops = { | 1012 | static const struct dev_pm_ops sata_rcar_pm_ops = { |
996 | .suspend = sata_rcar_suspend, | 1013 | .suspend = sata_rcar_suspend, |
997 | .resume = sata_rcar_resume, | 1014 | .resume = sata_rcar_resume, |
1015 | .freeze = sata_rcar_suspend, | ||
1016 | .thaw = sata_rcar_resume, | ||
1017 | .poweroff = sata_rcar_suspend, | ||
1018 | .restore = sata_rcar_restore, | ||
998 | }; | 1019 | }; |
999 | #endif | 1020 | #endif |
1000 | 1021 | ||
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 58470c395301..c3293f0a8573 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -855,7 +855,6 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, | |||
855 | 855 | ||
856 | fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL); | 856 | fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL); |
857 | if (!fw_priv) { | 857 | if (!fw_priv) { |
858 | dev_err(device, "%s: kmalloc failed\n", __func__); | ||
859 | fw_priv = ERR_PTR(-ENOMEM); | 858 | fw_priv = ERR_PTR(-ENOMEM); |
860 | goto exit; | 859 | goto exit; |
861 | } | 860 | } |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index d626576a4f75..7fdd0172605a 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -81,10 +81,8 @@ static int __pm_clk_add(struct device *dev, const char *con_id, | |||
81 | return -EINVAL; | 81 | return -EINVAL; |
82 | 82 | ||
83 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | 83 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); |
84 | if (!ce) { | 84 | if (!ce) |
85 | dev_err(dev, "Not enough memory for clock entry.\n"); | ||
86 | return -ENOMEM; | 85 | return -ENOMEM; |
87 | } | ||
88 | 86 | ||
89 | if (con_id) { | 87 | if (con_id) { |
90 | ce->con_id = kstrdup(con_id, GFP_KERNEL); | 88 | ce->con_id = kstrdup(con_id, GFP_KERNEL); |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index b0f138806bbc..f32b802b98f4 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c | |||
@@ -19,8 +19,8 @@ | |||
19 | * @dev: Device to handle. | 19 | * @dev: Device to handle. |
20 | * | 20 | * |
21 | * If power.subsys_data is NULL, point it to a new object, otherwise increment | 21 | * If power.subsys_data is NULL, point it to a new object, otherwise increment |
22 | * its reference counter. Return 1 if a new object has been created, otherwise | 22 | * its reference counter. Return 0 if new object has been created or refcount |
23 | * return 0 or error code. | 23 | * increased, otherwise negative error code. |
24 | */ | 24 | */ |
25 | int dev_pm_get_subsys_data(struct device *dev) | 25 | int dev_pm_get_subsys_data(struct device *dev) |
26 | { | 26 | { |
@@ -56,13 +56,11 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | |||
56 | * @dev: Device to handle. | 56 | * @dev: Device to handle. |
57 | * | 57 | * |
58 | * If the reference counter of power.subsys_data is zero after dropping the | 58 | * If the reference counter of power.subsys_data is zero after dropping the |
59 | * reference, power.subsys_data is removed. Return 1 if that happens or 0 | 59 | * reference, power.subsys_data is removed. |
60 | * otherwise. | ||
61 | */ | 60 | */ |
62 | int dev_pm_put_subsys_data(struct device *dev) | 61 | void dev_pm_put_subsys_data(struct device *dev) |
63 | { | 62 | { |
64 | struct pm_subsys_data *psd; | 63 | struct pm_subsys_data *psd; |
65 | int ret = 1; | ||
66 | 64 | ||
67 | spin_lock_irq(&dev->power.lock); | 65 | spin_lock_irq(&dev->power.lock); |
68 | 66 | ||
@@ -70,18 +68,14 @@ int dev_pm_put_subsys_data(struct device *dev) | |||
70 | if (!psd) | 68 | if (!psd) |
71 | goto out; | 69 | goto out; |
72 | 70 | ||
73 | if (--psd->refcount == 0) { | 71 | if (--psd->refcount == 0) |
74 | dev->power.subsys_data = NULL; | 72 | dev->power.subsys_data = NULL; |
75 | } else { | 73 | else |
76 | psd = NULL; | 74 | psd = NULL; |
77 | ret = 0; | ||
78 | } | ||
79 | 75 | ||
80 | out: | 76 | out: |
81 | spin_unlock_irq(&dev->power.lock); | 77 | spin_unlock_irq(&dev->power.lock); |
82 | kfree(psd); | 78 | kfree(psd); |
83 | |||
84 | return ret; | ||
85 | } | 79 | } |
86 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); | 80 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); |
87 | 81 | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0d8780c04a5e..ba4abbe4693c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -344,14 +344,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, | |||
344 | struct device *dev; | 344 | struct device *dev; |
345 | 345 | ||
346 | gpd_data = container_of(nb, struct generic_pm_domain_data, nb); | 346 | gpd_data = container_of(nb, struct generic_pm_domain_data, nb); |
347 | |||
348 | mutex_lock(&gpd_data->lock); | ||
349 | dev = gpd_data->base.dev; | 347 | dev = gpd_data->base.dev; |
350 | if (!dev) { | ||
351 | mutex_unlock(&gpd_data->lock); | ||
352 | return NOTIFY_DONE; | ||
353 | } | ||
354 | mutex_unlock(&gpd_data->lock); | ||
355 | 348 | ||
356 | for (;;) { | 349 | for (;;) { |
357 | struct generic_pm_domain *genpd; | 350 | struct generic_pm_domain *genpd; |
@@ -1384,25 +1377,66 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); | |||
1384 | 1377 | ||
1385 | #endif /* CONFIG_PM_SLEEP */ | 1378 | #endif /* CONFIG_PM_SLEEP */ |
1386 | 1379 | ||
1387 | static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) | 1380 | static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, |
1381 | struct generic_pm_domain *genpd, | ||
1382 | struct gpd_timing_data *td) | ||
1388 | { | 1383 | { |
1389 | struct generic_pm_domain_data *gpd_data; | 1384 | struct generic_pm_domain_data *gpd_data; |
1385 | int ret; | ||
1386 | |||
1387 | ret = dev_pm_get_subsys_data(dev); | ||
1388 | if (ret) | ||
1389 | return ERR_PTR(ret); | ||
1390 | 1390 | ||
1391 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); | 1391 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); |
1392 | if (!gpd_data) | 1392 | if (!gpd_data) { |
1393 | return NULL; | 1393 | ret = -ENOMEM; |
1394 | goto err_put; | ||
1395 | } | ||
1396 | |||
1397 | if (td) | ||
1398 | gpd_data->td = *td; | ||
1394 | 1399 | ||
1395 | mutex_init(&gpd_data->lock); | 1400 | gpd_data->base.dev = dev; |
1401 | gpd_data->need_restore = -1; | ||
1402 | gpd_data->td.constraint_changed = true; | ||
1403 | gpd_data->td.effective_constraint_ns = -1; | ||
1396 | gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; | 1404 | gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; |
1397 | dev_pm_qos_add_notifier(dev, &gpd_data->nb); | 1405 | |
1406 | spin_lock_irq(&dev->power.lock); | ||
1407 | |||
1408 | if (dev->power.subsys_data->domain_data) { | ||
1409 | ret = -EINVAL; | ||
1410 | goto err_free; | ||
1411 | } | ||
1412 | |||
1413 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
1414 | dev->pm_domain = &genpd->domain; | ||
1415 | |||
1416 | spin_unlock_irq(&dev->power.lock); | ||
1417 | |||
1398 | return gpd_data; | 1418 | return gpd_data; |
1419 | |||
1420 | err_free: | ||
1421 | spin_unlock_irq(&dev->power.lock); | ||
1422 | kfree(gpd_data); | ||
1423 | err_put: | ||
1424 | dev_pm_put_subsys_data(dev); | ||
1425 | return ERR_PTR(ret); | ||
1399 | } | 1426 | } |
1400 | 1427 | ||
1401 | static void __pm_genpd_free_dev_data(struct device *dev, | 1428 | static void genpd_free_dev_data(struct device *dev, |
1402 | struct generic_pm_domain_data *gpd_data) | 1429 | struct generic_pm_domain_data *gpd_data) |
1403 | { | 1430 | { |
1404 | dev_pm_qos_remove_notifier(dev, &gpd_data->nb); | 1431 | spin_lock_irq(&dev->power.lock); |
1432 | |||
1433 | dev->pm_domain = NULL; | ||
1434 | dev->power.subsys_data->domain_data = NULL; | ||
1435 | |||
1436 | spin_unlock_irq(&dev->power.lock); | ||
1437 | |||
1405 | kfree(gpd_data); | 1438 | kfree(gpd_data); |
1439 | dev_pm_put_subsys_data(dev); | ||
1406 | } | 1440 | } |
1407 | 1441 | ||
1408 | /** | 1442 | /** |
@@ -1414,8 +1448,7 @@ static void __pm_genpd_free_dev_data(struct device *dev, | |||
1414 | int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | 1448 | int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, |
1415 | struct gpd_timing_data *td) | 1449 | struct gpd_timing_data *td) |
1416 | { | 1450 | { |
1417 | struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; | 1451 | struct generic_pm_domain_data *gpd_data; |
1418 | struct pm_domain_data *pdd; | ||
1419 | int ret = 0; | 1452 | int ret = 0; |
1420 | 1453 | ||
1421 | dev_dbg(dev, "%s()\n", __func__); | 1454 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1423,9 +1456,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | |||
1423 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) | 1456 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) |
1424 | return -EINVAL; | 1457 | return -EINVAL; |
1425 | 1458 | ||
1426 | gpd_data_new = __pm_genpd_alloc_dev_data(dev); | 1459 | gpd_data = genpd_alloc_dev_data(dev, genpd, td); |
1427 | if (!gpd_data_new) | 1460 | if (IS_ERR(gpd_data)) |
1428 | return -ENOMEM; | 1461 | return PTR_ERR(gpd_data); |
1429 | 1462 | ||
1430 | genpd_acquire_lock(genpd); | 1463 | genpd_acquire_lock(genpd); |
1431 | 1464 | ||
@@ -1434,50 +1467,22 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, | |||
1434 | goto out; | 1467 | goto out; |
1435 | } | 1468 | } |
1436 | 1469 | ||
1437 | list_for_each_entry(pdd, &genpd->dev_list, list_node) | 1470 | ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; |
1438 | if (pdd->dev == dev) { | ||
1439 | ret = -EINVAL; | ||
1440 | goto out; | ||
1441 | } | ||
1442 | |||
1443 | ret = dev_pm_get_subsys_data(dev); | ||
1444 | if (ret) | 1471 | if (ret) |
1445 | goto out; | 1472 | goto out; |
1446 | 1473 | ||
1447 | genpd->device_count++; | 1474 | genpd->device_count++; |
1448 | genpd->max_off_time_changed = true; | 1475 | genpd->max_off_time_changed = true; |
1449 | 1476 | ||
1450 | spin_lock_irq(&dev->power.lock); | ||
1451 | |||
1452 | dev->pm_domain = &genpd->domain; | ||
1453 | if (dev->power.subsys_data->domain_data) { | ||
1454 | gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); | ||
1455 | } else { | ||
1456 | gpd_data = gpd_data_new; | ||
1457 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
1458 | } | ||
1459 | gpd_data->refcount++; | ||
1460 | if (td) | ||
1461 | gpd_data->td = *td; | ||
1462 | |||
1463 | spin_unlock_irq(&dev->power.lock); | ||
1464 | |||
1465 | if (genpd->attach_dev) | ||
1466 | genpd->attach_dev(genpd, dev); | ||
1467 | |||
1468 | mutex_lock(&gpd_data->lock); | ||
1469 | gpd_data->base.dev = dev; | ||
1470 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | 1477 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); |
1471 | gpd_data->need_restore = -1; | ||
1472 | gpd_data->td.constraint_changed = true; | ||
1473 | gpd_data->td.effective_constraint_ns = -1; | ||
1474 | mutex_unlock(&gpd_data->lock); | ||
1475 | 1478 | ||
1476 | out: | 1479 | out: |
1477 | genpd_release_lock(genpd); | 1480 | genpd_release_lock(genpd); |
1478 | 1481 | ||
1479 | if (gpd_data != gpd_data_new) | 1482 | if (ret) |
1480 | __pm_genpd_free_dev_data(dev, gpd_data_new); | 1483 | genpd_free_dev_data(dev, gpd_data); |
1484 | else | ||
1485 | dev_pm_qos_add_notifier(dev, &gpd_data->nb); | ||
1481 | 1486 | ||
1482 | return ret; | 1487 | return ret; |
1483 | } | 1488 | } |
@@ -1504,7 +1509,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1504 | { | 1509 | { |
1505 | struct generic_pm_domain_data *gpd_data; | 1510 | struct generic_pm_domain_data *gpd_data; |
1506 | struct pm_domain_data *pdd; | 1511 | struct pm_domain_data *pdd; |
1507 | bool remove = false; | ||
1508 | int ret = 0; | 1512 | int ret = 0; |
1509 | 1513 | ||
1510 | dev_dbg(dev, "%s()\n", __func__); | 1514 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1514,6 +1518,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1514 | || pd_to_genpd(dev->pm_domain) != genpd) | 1518 | || pd_to_genpd(dev->pm_domain) != genpd) |
1515 | return -EINVAL; | 1519 | return -EINVAL; |
1516 | 1520 | ||
1521 | /* The above validation also means we have existing domain_data. */ | ||
1522 | pdd = dev->power.subsys_data->domain_data; | ||
1523 | gpd_data = to_gpd_data(pdd); | ||
1524 | dev_pm_qos_remove_notifier(dev, &gpd_data->nb); | ||
1525 | |||
1517 | genpd_acquire_lock(genpd); | 1526 | genpd_acquire_lock(genpd); |
1518 | 1527 | ||
1519 | if (genpd->prepared_count > 0) { | 1528 | if (genpd->prepared_count > 0) { |
@@ -1527,58 +1536,22 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1527 | if (genpd->detach_dev) | 1536 | if (genpd->detach_dev) |
1528 | genpd->detach_dev(genpd, dev); | 1537 | genpd->detach_dev(genpd, dev); |
1529 | 1538 | ||
1530 | spin_lock_irq(&dev->power.lock); | ||
1531 | |||
1532 | dev->pm_domain = NULL; | ||
1533 | pdd = dev->power.subsys_data->domain_data; | ||
1534 | list_del_init(&pdd->list_node); | 1539 | list_del_init(&pdd->list_node); |
1535 | gpd_data = to_gpd_data(pdd); | ||
1536 | if (--gpd_data->refcount == 0) { | ||
1537 | dev->power.subsys_data->domain_data = NULL; | ||
1538 | remove = true; | ||
1539 | } | ||
1540 | |||
1541 | spin_unlock_irq(&dev->power.lock); | ||
1542 | |||
1543 | mutex_lock(&gpd_data->lock); | ||
1544 | pdd->dev = NULL; | ||
1545 | mutex_unlock(&gpd_data->lock); | ||
1546 | 1540 | ||
1547 | genpd_release_lock(genpd); | 1541 | genpd_release_lock(genpd); |
1548 | 1542 | ||
1549 | dev_pm_put_subsys_data(dev); | 1543 | genpd_free_dev_data(dev, gpd_data); |
1550 | if (remove) | ||
1551 | __pm_genpd_free_dev_data(dev, gpd_data); | ||
1552 | 1544 | ||
1553 | return 0; | 1545 | return 0; |
1554 | 1546 | ||
1555 | out: | 1547 | out: |
1556 | genpd_release_lock(genpd); | 1548 | genpd_release_lock(genpd); |
1549 | dev_pm_qos_add_notifier(dev, &gpd_data->nb); | ||
1557 | 1550 | ||
1558 | return ret; | 1551 | return ret; |
1559 | } | 1552 | } |
1560 | 1553 | ||
1561 | /** | 1554 | /** |
1562 | * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. | ||
1563 | * @dev: Device to set/unset the flag for. | ||
1564 | * @val: The new value of the device's "need restore" flag. | ||
1565 | */ | ||
1566 | void pm_genpd_dev_need_restore(struct device *dev, bool val) | ||
1567 | { | ||
1568 | struct pm_subsys_data *psd; | ||
1569 | unsigned long flags; | ||
1570 | |||
1571 | spin_lock_irqsave(&dev->power.lock, flags); | ||
1572 | |||
1573 | psd = dev_to_psd(dev); | ||
1574 | if (psd && psd->domain_data) | ||
1575 | to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0; | ||
1576 | |||
1577 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
1578 | } | ||
1579 | EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore); | ||
1580 | |||
1581 | /** | ||
1582 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1555 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
1583 | * @genpd: Master PM domain to add the subdomain to. | 1556 | * @genpd: Master PM domain to add the subdomain to. |
1584 | * @subdomain: Subdomain to be added. | 1557 | * @subdomain: Subdomain to be added. |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 106c69359306..677fb2843553 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -117,20 +117,20 @@ do { \ | |||
117 | } while (0) | 117 | } while (0) |
118 | 118 | ||
119 | /** | 119 | /** |
120 | * find_device_opp() - find device_opp struct using device pointer | 120 | * _find_device_opp() - find device_opp struct using device pointer |
121 | * @dev: device pointer used to lookup device OPPs | 121 | * @dev: device pointer used to lookup device OPPs |
122 | * | 122 | * |
123 | * Search list of device OPPs for one containing matching device. Does a RCU | 123 | * Search list of device OPPs for one containing matching device. Does a RCU |
124 | * reader operation to grab the pointer needed. | 124 | * reader operation to grab the pointer needed. |
125 | * | 125 | * |
126 | * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or | 126 | * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or |
127 | * -EINVAL based on type of error. | 127 | * -EINVAL based on type of error. |
128 | * | 128 | * |
129 | * Locking: This function must be called under rcu_read_lock(). device_opp | 129 | * Locking: This function must be called under rcu_read_lock(). device_opp |
130 | * is a RCU protected pointer. This means that device_opp is valid as long | 130 | * is a RCU protected pointer. This means that device_opp is valid as long |
131 | * as we are under RCU lock. | 131 | * as we are under RCU lock. |
132 | */ | 132 | */ |
133 | static struct device_opp *find_device_opp(struct device *dev) | 133 | static struct device_opp *_find_device_opp(struct device *dev) |
134 | { | 134 | { |
135 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); | 135 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); |
136 | 136 | ||
@@ -153,7 +153,7 @@ static struct device_opp *find_device_opp(struct device *dev) | |||
153 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp | 153 | * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp |
154 | * @opp: opp for which voltage has to be returned for | 154 | * @opp: opp for which voltage has to be returned for |
155 | * | 155 | * |
156 | * Return voltage in micro volt corresponding to the opp, else | 156 | * Return: voltage in micro volt corresponding to the opp, else |
157 | * return 0 | 157 | * return 0 |
158 | * | 158 | * |
159 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 159 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
@@ -169,6 +169,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) | |||
169 | struct dev_pm_opp *tmp_opp; | 169 | struct dev_pm_opp *tmp_opp; |
170 | unsigned long v = 0; | 170 | unsigned long v = 0; |
171 | 171 | ||
172 | opp_rcu_lockdep_assert(); | ||
173 | |||
172 | tmp_opp = rcu_dereference(opp); | 174 | tmp_opp = rcu_dereference(opp); |
173 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | 175 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) |
174 | pr_err("%s: Invalid parameters\n", __func__); | 176 | pr_err("%s: Invalid parameters\n", __func__); |
@@ -183,7 +185,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); | |||
183 | * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp | 185 | * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp |
184 | * @opp: opp for which frequency has to be returned for | 186 | * @opp: opp for which frequency has to be returned for |
185 | * | 187 | * |
186 | * Return frequency in hertz corresponding to the opp, else | 188 | * Return: frequency in hertz corresponding to the opp, else |
187 | * return 0 | 189 | * return 0 |
188 | * | 190 | * |
189 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 191 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu |
@@ -199,6 +201,8 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) | |||
199 | struct dev_pm_opp *tmp_opp; | 201 | struct dev_pm_opp *tmp_opp; |
200 | unsigned long f = 0; | 202 | unsigned long f = 0; |
201 | 203 | ||
204 | opp_rcu_lockdep_assert(); | ||
205 | |||
202 | tmp_opp = rcu_dereference(opp); | 206 | tmp_opp = rcu_dereference(opp); |
203 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | 207 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) |
204 | pr_err("%s: Invalid parameters\n", __func__); | 208 | pr_err("%s: Invalid parameters\n", __func__); |
@@ -213,7 +217,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); | |||
213 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list | 217 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list |
214 | * @dev: device for which we do this operation | 218 | * @dev: device for which we do this operation |
215 | * | 219 | * |
216 | * This function returns the number of available opps if there are any, | 220 | * Return: This function returns the number of available opps if there are any, |
217 | * else returns 0 if none or the corresponding error value. | 221 | * else returns 0 if none or the corresponding error value. |
218 | * | 222 | * |
219 | * Locking: This function takes rcu_read_lock(). | 223 | * Locking: This function takes rcu_read_lock(). |
@@ -226,7 +230,7 @@ int dev_pm_opp_get_opp_count(struct device *dev) | |||
226 | 230 | ||
227 | rcu_read_lock(); | 231 | rcu_read_lock(); |
228 | 232 | ||
229 | dev_opp = find_device_opp(dev); | 233 | dev_opp = _find_device_opp(dev); |
230 | if (IS_ERR(dev_opp)) { | 234 | if (IS_ERR(dev_opp)) { |
231 | count = PTR_ERR(dev_opp); | 235 | count = PTR_ERR(dev_opp); |
232 | dev_err(dev, "%s: device OPP not found (%d)\n", | 236 | dev_err(dev, "%s: device OPP not found (%d)\n", |
@@ -251,9 +255,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); | |||
251 | * @freq: frequency to search for | 255 | * @freq: frequency to search for |
252 | * @available: true/false - match for available opp | 256 | * @available: true/false - match for available opp |
253 | * | 257 | * |
254 | * Searches for exact match in the opp list and returns pointer to the matching | 258 | * Return: Searches for exact match in the opp list and returns pointer to the |
255 | * opp if found, else returns ERR_PTR in case of error and should be handled | 259 | * matching opp if found, else returns ERR_PTR in case of error and should |
256 | * using IS_ERR. Error return values can be: | 260 | * be handled using IS_ERR. Error return values can be: |
257 | * EINVAL: for bad pointer | 261 | * EINVAL: for bad pointer |
258 | * ERANGE: no match found for search | 262 | * ERANGE: no match found for search |
259 | * ENODEV: if device not found in list of registered devices | 263 | * ENODEV: if device not found in list of registered devices |
@@ -280,7 +284,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | |||
280 | 284 | ||
281 | opp_rcu_lockdep_assert(); | 285 | opp_rcu_lockdep_assert(); |
282 | 286 | ||
283 | dev_opp = find_device_opp(dev); | 287 | dev_opp = _find_device_opp(dev); |
284 | if (IS_ERR(dev_opp)) { | 288 | if (IS_ERR(dev_opp)) { |
285 | int r = PTR_ERR(dev_opp); | 289 | int r = PTR_ERR(dev_opp); |
286 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | 290 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); |
@@ -307,7 +311,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); | |||
307 | * Search for the matching ceil *available* OPP from a starting freq | 311 | * Search for the matching ceil *available* OPP from a starting freq |
308 | * for a device. | 312 | * for a device. |
309 | * | 313 | * |
310 | * Returns matching *opp and refreshes *freq accordingly, else returns | 314 | * Return: matching *opp and refreshes *freq accordingly, else returns |
311 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return | 315 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
312 | * values can be: | 316 | * values can be: |
313 | * EINVAL: for bad pointer | 317 | * EINVAL: for bad pointer |
@@ -333,7 +337,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, | |||
333 | return ERR_PTR(-EINVAL); | 337 | return ERR_PTR(-EINVAL); |
334 | } | 338 | } |
335 | 339 | ||
336 | dev_opp = find_device_opp(dev); | 340 | dev_opp = _find_device_opp(dev); |
337 | if (IS_ERR(dev_opp)) | 341 | if (IS_ERR(dev_opp)) |
338 | return ERR_CAST(dev_opp); | 342 | return ERR_CAST(dev_opp); |
339 | 343 | ||
@@ -357,7 +361,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); | |||
357 | * Search for the matching floor *available* OPP from a starting freq | 361 | * Search for the matching floor *available* OPP from a starting freq |
358 | * for a device. | 362 | * for a device. |
359 | * | 363 | * |
360 | * Returns matching *opp and refreshes *freq accordingly, else returns | 364 | * Return: matching *opp and refreshes *freq accordingly, else returns |
361 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return | 365 | * ERR_PTR in case of error and should be handled using IS_ERR. Error return |
362 | * values can be: | 366 | * values can be: |
363 | * EINVAL: for bad pointer | 367 | * EINVAL: for bad pointer |
@@ -383,7 +387,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
383 | return ERR_PTR(-EINVAL); | 387 | return ERR_PTR(-EINVAL); |
384 | } | 388 | } |
385 | 389 | ||
386 | dev_opp = find_device_opp(dev); | 390 | dev_opp = _find_device_opp(dev); |
387 | if (IS_ERR(dev_opp)) | 391 | if (IS_ERR(dev_opp)) |
388 | return ERR_CAST(dev_opp); | 392 | return ERR_CAST(dev_opp); |
389 | 393 | ||
@@ -403,7 +407,16 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
403 | } | 407 | } |
404 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); | 408 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
405 | 409 | ||
406 | static struct device_opp *add_device_opp(struct device *dev) | 410 | /** |
411 | * _add_device_opp() - Allocate a new device OPP table | ||
412 | * @dev: device for which we do this operation | ||
413 | * | ||
414 | * New device node which uses OPPs - used when multiple devices with OPP tables | ||
415 | * are maintained. | ||
416 | * | ||
417 | * Return: valid device_opp pointer if success, else NULL. | ||
418 | */ | ||
419 | static struct device_opp *_add_device_opp(struct device *dev) | ||
407 | { | 420 | { |
408 | struct device_opp *dev_opp; | 421 | struct device_opp *dev_opp; |
409 | 422 | ||
@@ -424,8 +437,35 @@ static struct device_opp *add_device_opp(struct device *dev) | |||
424 | return dev_opp; | 437 | return dev_opp; |
425 | } | 438 | } |
426 | 439 | ||
427 | static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, | 440 | /** |
428 | unsigned long u_volt, bool dynamic) | 441 | * _opp_add_dynamic() - Allocate a dynamic OPP. |
442 | * @dev: device for which we do this operation | ||
443 | * @freq: Frequency in Hz for this OPP | ||
444 | * @u_volt: Voltage in uVolts for this OPP | ||
445 | * @dynamic: Dynamically added OPPs. | ||
446 | * | ||
447 | * This function adds an opp definition to the opp list and returns status. | ||
448 | * The opp is made available by default and it can be controlled using | ||
449 | * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. | ||
450 | * | ||
451 | * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and | ||
452 | * freed by of_free_opp_table. | ||
453 | * | ||
454 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
455 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
456 | * to keep the integrity of the internal data structures. Callers should ensure | ||
457 | * that this function is *NOT* called under RCU protection or in contexts where | ||
458 | * mutex cannot be locked. | ||
459 | * | ||
460 | * Return: | ||
461 | * 0 On success OR | ||
462 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
463 | * -EEXIST Freq are same and volt are different OR | ||
464 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
465 | * -ENOMEM Memory allocation failure | ||
466 | */ | ||
467 | static int _opp_add_dynamic(struct device *dev, unsigned long freq, | ||
468 | long u_volt, bool dynamic) | ||
429 | { | 469 | { |
430 | struct device_opp *dev_opp = NULL; | 470 | struct device_opp *dev_opp = NULL; |
431 | struct dev_pm_opp *opp, *new_opp; | 471 | struct dev_pm_opp *opp, *new_opp; |
@@ -434,10 +474,8 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, | |||
434 | 474 | ||
435 | /* allocate new OPP node */ | 475 | /* allocate new OPP node */ |
436 | new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); | 476 | new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); |
437 | if (!new_opp) { | 477 | if (!new_opp) |
438 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); | ||
439 | return -ENOMEM; | 478 | return -ENOMEM; |
440 | } | ||
441 | 479 | ||
442 | /* Hold our list modification lock here */ | 480 | /* Hold our list modification lock here */ |
443 | mutex_lock(&dev_opp_list_lock); | 481 | mutex_lock(&dev_opp_list_lock); |
@@ -449,9 +487,9 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, | |||
449 | new_opp->dynamic = dynamic; | 487 | new_opp->dynamic = dynamic; |
450 | 488 | ||
451 | /* Check for existing list for 'dev' */ | 489 | /* Check for existing list for 'dev' */ |
452 | dev_opp = find_device_opp(dev); | 490 | dev_opp = _find_device_opp(dev); |
453 | if (IS_ERR(dev_opp)) { | 491 | if (IS_ERR(dev_opp)) { |
454 | dev_opp = add_device_opp(dev); | 492 | dev_opp = _add_device_opp(dev); |
455 | if (!dev_opp) { | 493 | if (!dev_opp) { |
456 | ret = -ENOMEM; | 494 | ret = -ENOMEM; |
457 | goto free_opp; | 495 | goto free_opp; |
@@ -519,34 +557,53 @@ free_opp: | |||
519 | * mutex cannot be locked. | 557 | * mutex cannot be locked. |
520 | * | 558 | * |
521 | * Return: | 559 | * Return: |
522 | * 0: On success OR | 560 | * 0 On success OR |
523 | * Duplicate OPPs (both freq and volt are same) and opp->available | 561 | * Duplicate OPPs (both freq and volt are same) and opp->available |
524 | * -EEXIST: Freq are same and volt are different OR | 562 | * -EEXIST Freq are same and volt are different OR |
525 | * Duplicate OPPs (both freq and volt are same) and !opp->available | 563 | * Duplicate OPPs (both freq and volt are same) and !opp->available |
526 | * -ENOMEM: Memory allocation failure | 564 | * -ENOMEM Memory allocation failure |
527 | */ | 565 | */ |
528 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | 566 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) |
529 | { | 567 | { |
530 | return dev_pm_opp_add_dynamic(dev, freq, u_volt, true); | 568 | return _opp_add_dynamic(dev, freq, u_volt, true); |
531 | } | 569 | } |
532 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); | 570 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); |
533 | 571 | ||
534 | static void kfree_opp_rcu(struct rcu_head *head) | 572 | /** |
573 | * _kfree_opp_rcu() - Free OPP RCU handler | ||
574 | * @head: RCU head | ||
575 | */ | ||
576 | static void _kfree_opp_rcu(struct rcu_head *head) | ||
535 | { | 577 | { |
536 | struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); | 578 | struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); |
537 | 579 | ||
538 | kfree_rcu(opp, rcu_head); | 580 | kfree_rcu(opp, rcu_head); |
539 | } | 581 | } |
540 | 582 | ||
541 | static void kfree_device_rcu(struct rcu_head *head) | 583 | /** |
584 | * _kfree_device_rcu() - Free device_opp RCU handler | ||
585 | * @head: RCU head | ||
586 | */ | ||
587 | static void _kfree_device_rcu(struct rcu_head *head) | ||
542 | { | 588 | { |
543 | struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); | 589 | struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); |
544 | 590 | ||
545 | kfree_rcu(device_opp, rcu_head); | 591 | kfree_rcu(device_opp, rcu_head); |
546 | } | 592 | } |
547 | 593 | ||
548 | static void __dev_pm_opp_remove(struct device_opp *dev_opp, | 594 | /** |
549 | struct dev_pm_opp *opp) | 595 | * _opp_remove() - Remove an OPP from a table definition |
596 | * @dev_opp: points back to the device_opp struct this opp belongs to | ||
597 | * @opp: pointer to the OPP to remove | ||
598 | * | ||
599 | * This function removes an opp definition from the opp list. | ||
600 | * | ||
601 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
602 | * It is assumed that the caller holds required mutex for an RCU updater | ||
603 | * strategy. | ||
604 | */ | ||
605 | static void _opp_remove(struct device_opp *dev_opp, | ||
606 | struct dev_pm_opp *opp) | ||
550 | { | 607 | { |
551 | /* | 608 | /* |
552 | * Notify the changes in the availability of the operable | 609 | * Notify the changes in the availability of the operable |
@@ -554,12 +611,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp, | |||
554 | */ | 611 | */ |
555 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); | 612 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp); |
556 | list_del_rcu(&opp->node); | 613 | list_del_rcu(&opp->node); |
557 | call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu); | 614 | call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); |
558 | 615 | ||
559 | if (list_empty(&dev_opp->opp_list)) { | 616 | if (list_empty(&dev_opp->opp_list)) { |
560 | list_del_rcu(&dev_opp->node); | 617 | list_del_rcu(&dev_opp->node); |
561 | call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, | 618 | call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head, |
562 | kfree_device_rcu); | 619 | _kfree_device_rcu); |
563 | } | 620 | } |
564 | } | 621 | } |
565 | 622 | ||
@@ -569,6 +626,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp, | |||
569 | * @freq: OPP to remove with matching 'freq' | 626 | * @freq: OPP to remove with matching 'freq' |
570 | * | 627 | * |
571 | * This function removes an opp from the opp list. | 628 | * This function removes an opp from the opp list. |
629 | * | ||
630 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
631 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
632 | * to keep the integrity of the internal data structures. Callers should ensure | ||
633 | * that this function is *NOT* called under RCU protection or in contexts where | ||
634 | * mutex cannot be locked. | ||
572 | */ | 635 | */ |
573 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) | 636 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) |
574 | { | 637 | { |
@@ -579,7 +642,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) | |||
579 | /* Hold our list modification lock here */ | 642 | /* Hold our list modification lock here */ |
580 | mutex_lock(&dev_opp_list_lock); | 643 | mutex_lock(&dev_opp_list_lock); |
581 | 644 | ||
582 | dev_opp = find_device_opp(dev); | 645 | dev_opp = _find_device_opp(dev); |
583 | if (IS_ERR(dev_opp)) | 646 | if (IS_ERR(dev_opp)) |
584 | goto unlock; | 647 | goto unlock; |
585 | 648 | ||
@@ -596,14 +659,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) | |||
596 | goto unlock; | 659 | goto unlock; |
597 | } | 660 | } |
598 | 661 | ||
599 | __dev_pm_opp_remove(dev_opp, opp); | 662 | _opp_remove(dev_opp, opp); |
600 | unlock: | 663 | unlock: |
601 | mutex_unlock(&dev_opp_list_lock); | 664 | mutex_unlock(&dev_opp_list_lock); |
602 | } | 665 | } |
603 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | 666 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); |
604 | 667 | ||
605 | /** | 668 | /** |
606 | * opp_set_availability() - helper to set the availability of an opp | 669 | * _opp_set_availability() - helper to set the availability of an opp |
607 | * @dev: device for which we do this operation | 670 | * @dev: device for which we do this operation |
608 | * @freq: OPP frequency to modify availability | 671 | * @freq: OPP frequency to modify availability |
609 | * @availability_req: availability status requested for this opp | 672 | * @availability_req: availability status requested for this opp |
@@ -611,7 +674,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | |||
611 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} | 674 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} |
612 | * share a common logic which is isolated here. | 675 | * share a common logic which is isolated here. |
613 | * | 676 | * |
614 | * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the | 677 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
615 | * copy operation, returns 0 if no modifcation was done OR modification was | 678 | * copy operation, returns 0 if no modifcation was done OR modification was |
616 | * successful. | 679 | * successful. |
617 | * | 680 | * |
@@ -621,8 +684,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | |||
621 | * that this function is *NOT* called under RCU protection or in contexts where | 684 | * that this function is *NOT* called under RCU protection or in contexts where |
622 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 685 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
623 | */ | 686 | */ |
624 | static int opp_set_availability(struct device *dev, unsigned long freq, | 687 | static int _opp_set_availability(struct device *dev, unsigned long freq, |
625 | bool availability_req) | 688 | bool availability_req) |
626 | { | 689 | { |
627 | struct device_opp *dev_opp; | 690 | struct device_opp *dev_opp; |
628 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | 691 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); |
@@ -630,15 +693,13 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
630 | 693 | ||
631 | /* keep the node allocated */ | 694 | /* keep the node allocated */ |
632 | new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); | 695 | new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); |
633 | if (!new_opp) { | 696 | if (!new_opp) |
634 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); | ||
635 | return -ENOMEM; | 697 | return -ENOMEM; |
636 | } | ||
637 | 698 | ||
638 | mutex_lock(&dev_opp_list_lock); | 699 | mutex_lock(&dev_opp_list_lock); |
639 | 700 | ||
640 | /* Find the device_opp */ | 701 | /* Find the device_opp */ |
641 | dev_opp = find_device_opp(dev); | 702 | dev_opp = _find_device_opp(dev); |
642 | if (IS_ERR(dev_opp)) { | 703 | if (IS_ERR(dev_opp)) { |
643 | r = PTR_ERR(dev_opp); | 704 | r = PTR_ERR(dev_opp); |
644 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | 705 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); |
@@ -668,7 +729,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
668 | 729 | ||
669 | list_replace_rcu(&opp->node, &new_opp->node); | 730 | list_replace_rcu(&opp->node, &new_opp->node); |
670 | mutex_unlock(&dev_opp_list_lock); | 731 | mutex_unlock(&dev_opp_list_lock); |
671 | call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu); | 732 | call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); |
672 | 733 | ||
673 | /* Notify the change of the OPP availability */ | 734 | /* Notify the change of the OPP availability */ |
674 | if (availability_req) | 735 | if (availability_req) |
@@ -700,10 +761,14 @@ unlock: | |||
700 | * integrity of the internal data structures. Callers should ensure that | 761 | * integrity of the internal data structures. Callers should ensure that |
701 | * this function is *NOT* called under RCU protection or in contexts where | 762 | * this function is *NOT* called under RCU protection or in contexts where |
702 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 763 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
764 | * | ||
765 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
766 | * copy operation, returns 0 if no modifcation was done OR modification was | ||
767 | * successful. | ||
703 | */ | 768 | */ |
704 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) | 769 | int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
705 | { | 770 | { |
706 | return opp_set_availability(dev, freq, true); | 771 | return _opp_set_availability(dev, freq, true); |
707 | } | 772 | } |
708 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); | 773 | EXPORT_SYMBOL_GPL(dev_pm_opp_enable); |
709 | 774 | ||
@@ -722,26 +787,41 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); | |||
722 | * integrity of the internal data structures. Callers should ensure that | 787 | * integrity of the internal data structures. Callers should ensure that |
723 | * this function is *NOT* called under RCU protection or in contexts where | 788 | * this function is *NOT* called under RCU protection or in contexts where |
724 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | 789 | * mutex locking or synchronize_rcu() blocking calls cannot be used. |
790 | * | ||
791 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
792 | * copy operation, returns 0 if no modifcation was done OR modification was | ||
793 | * successful. | ||
725 | */ | 794 | */ |
726 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) | 795 | int dev_pm_opp_disable(struct device *dev, unsigned long freq) |
727 | { | 796 | { |
728 | return opp_set_availability(dev, freq, false); | 797 | return _opp_set_availability(dev, freq, false); |
729 | } | 798 | } |
730 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); | 799 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
731 | 800 | ||
732 | /** | 801 | /** |
733 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp | 802 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp |
734 | * @dev: device pointer used to lookup device OPPs. | 803 | * @dev: device pointer used to lookup device OPPs. |
804 | * | ||
805 | * Return: pointer to notifier head if found, otherwise -ENODEV or | ||
806 | * -EINVAL based on type of error casted as pointer. value must be checked | ||
807 | * with IS_ERR to determine valid pointer or error result. | ||
808 | * | ||
809 | * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU | ||
810 | * protected pointer. The reason for the same is that the opp pointer which is | ||
811 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
812 | * under the locked area. The pointer returned must be used prior to unlocking | ||
813 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
735 | */ | 814 | */ |
736 | struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) | 815 | struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) |
737 | { | 816 | { |
738 | struct device_opp *dev_opp = find_device_opp(dev); | 817 | struct device_opp *dev_opp = _find_device_opp(dev); |
739 | 818 | ||
740 | if (IS_ERR(dev_opp)) | 819 | if (IS_ERR(dev_opp)) |
741 | return ERR_CAST(dev_opp); /* matching type */ | 820 | return ERR_CAST(dev_opp); /* matching type */ |
742 | 821 | ||
743 | return &dev_opp->srcu_head; | 822 | return &dev_opp->srcu_head; |
744 | } | 823 | } |
824 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); | ||
745 | 825 | ||
746 | #ifdef CONFIG_OF | 826 | #ifdef CONFIG_OF |
747 | /** | 827 | /** |
@@ -749,6 +829,22 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) | |||
749 | * @dev: device pointer used to lookup device OPPs. | 829 | * @dev: device pointer used to lookup device OPPs. |
750 | * | 830 | * |
751 | * Register the initial OPP table with the OPP library for given device. | 831 | * Register the initial OPP table with the OPP library for given device. |
832 | * | ||
833 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
834 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
835 | * to keep the integrity of the internal data structures. Callers should ensure | ||
836 | * that this function is *NOT* called under RCU protection or in contexts where | ||
837 | * mutex cannot be locked. | ||
838 | * | ||
839 | * Return: | ||
840 | * 0 On success OR | ||
841 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
842 | * -EEXIST Freq are same and volt are different OR | ||
843 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
844 | * -ENOMEM Memory allocation failure | ||
845 | * -ENODEV when 'operating-points' property is not found or is invalid data | ||
846 | * in device node. | ||
847 | * -ENODATA when empty 'operating-points' property is found | ||
752 | */ | 848 | */ |
753 | int of_init_opp_table(struct device *dev) | 849 | int of_init_opp_table(struct device *dev) |
754 | { | 850 | { |
@@ -777,7 +873,7 @@ int of_init_opp_table(struct device *dev) | |||
777 | unsigned long freq = be32_to_cpup(val++) * 1000; | 873 | unsigned long freq = be32_to_cpup(val++) * 1000; |
778 | unsigned long volt = be32_to_cpup(val++); | 874 | unsigned long volt = be32_to_cpup(val++); |
779 | 875 | ||
780 | if (dev_pm_opp_add_dynamic(dev, freq, volt, false)) | 876 | if (_opp_add_dynamic(dev, freq, volt, false)) |
781 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | 877 | dev_warn(dev, "%s: Failed to add OPP %ld\n", |
782 | __func__, freq); | 878 | __func__, freq); |
783 | nr -= 2; | 879 | nr -= 2; |
@@ -792,6 +888,12 @@ EXPORT_SYMBOL_GPL(of_init_opp_table); | |||
792 | * @dev: device pointer used to lookup device OPPs. | 888 | * @dev: device pointer used to lookup device OPPs. |
793 | * | 889 | * |
794 | * Free OPPs created using static entries present in DT. | 890 | * Free OPPs created using static entries present in DT. |
891 | * | ||
892 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
893 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
894 | * to keep the integrity of the internal data structures. Callers should ensure | ||
895 | * that this function is *NOT* called under RCU protection or in contexts where | ||
896 | * mutex cannot be locked. | ||
795 | */ | 897 | */ |
796 | void of_free_opp_table(struct device *dev) | 898 | void of_free_opp_table(struct device *dev) |
797 | { | 899 | { |
@@ -799,7 +901,7 @@ void of_free_opp_table(struct device *dev) | |||
799 | struct dev_pm_opp *opp, *tmp; | 901 | struct dev_pm_opp *opp, *tmp; |
800 | 902 | ||
801 | /* Check for existing list for 'dev' */ | 903 | /* Check for existing list for 'dev' */ |
802 | dev_opp = find_device_opp(dev); | 904 | dev_opp = _find_device_opp(dev); |
803 | if (IS_ERR(dev_opp)) { | 905 | if (IS_ERR(dev_opp)) { |
804 | int error = PTR_ERR(dev_opp); | 906 | int error = PTR_ERR(dev_opp); |
805 | if (error != -ENODEV) | 907 | if (error != -ENODEV) |
@@ -816,7 +918,7 @@ void of_free_opp_table(struct device *dev) | |||
816 | /* Free static OPPs */ | 918 | /* Free static OPPs */ |
817 | list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { | 919 | list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) { |
818 | if (!opp->dynamic) | 920 | if (!opp->dynamic) |
819 | __dev_pm_opp_remove(dev_opp, opp); | 921 | _opp_remove(dev_opp, opp); |
820 | } | 922 | } |
821 | 923 | ||
822 | mutex_unlock(&dev_opp_list_lock); | 924 | mutex_unlock(&dev_opp_list_lock); |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index a8fe4c1a8d07..e56d538d039e 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -64,6 +64,8 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) | |||
64 | struct pm_qos_flags *pqf; | 64 | struct pm_qos_flags *pqf; |
65 | s32 val; | 65 | s32 val; |
66 | 66 | ||
67 | lockdep_assert_held(&dev->power.lock); | ||
68 | |||
67 | if (IS_ERR_OR_NULL(qos)) | 69 | if (IS_ERR_OR_NULL(qos)) |
68 | return PM_QOS_FLAGS_UNDEFINED; | 70 | return PM_QOS_FLAGS_UNDEFINED; |
69 | 71 | ||
@@ -104,6 +106,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags); | |||
104 | */ | 106 | */ |
105 | s32 __dev_pm_qos_read_value(struct device *dev) | 107 | s32 __dev_pm_qos_read_value(struct device *dev) |
106 | { | 108 | { |
109 | lockdep_assert_held(&dev->power.lock); | ||
110 | |||
107 | return IS_ERR_OR_NULL(dev->power.qos) ? | 111 | return IS_ERR_OR_NULL(dev->power.qos) ? |
108 | 0 : pm_qos_read_value(&dev->power.qos->resume_latency); | 112 | 0 : pm_qos_read_value(&dev->power.qos->resume_latency); |
109 | } | 113 | } |
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 0da5865df5b1..beb8b27d4621 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h | |||
@@ -51,9 +51,11 @@ struct regmap_async { | |||
51 | struct regmap { | 51 | struct regmap { |
52 | union { | 52 | union { |
53 | struct mutex mutex; | 53 | struct mutex mutex; |
54 | spinlock_t spinlock; | 54 | struct { |
55 | spinlock_t spinlock; | ||
56 | unsigned long spinlock_flags; | ||
57 | }; | ||
55 | }; | 58 | }; |
56 | unsigned long spinlock_flags; | ||
57 | regmap_lock lock; | 59 | regmap_lock lock; |
58 | regmap_unlock unlock; | 60 | regmap_unlock unlock; |
59 | void *lock_arg; /* This is passed to lock/unlock functions */ | 61 | void *lock_arg; /* This is passed to lock/unlock functions */ |
@@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
233 | 235 | ||
234 | void regmap_async_complete_cb(struct regmap_async *async, int ret); | 236 | void regmap_async_complete_cb(struct regmap_async *async, int ret); |
235 | 237 | ||
238 | enum regmap_endian regmap_get_val_endian(struct device *dev, | ||
239 | const struct regmap_bus *bus, | ||
240 | const struct regmap_config *config); | ||
241 | |||
236 | extern struct regcache_ops regcache_rbtree_ops; | 242 | extern struct regcache_ops regcache_rbtree_ops; |
237 | extern struct regcache_ops regcache_lzo_ops; | 243 | extern struct regcache_ops regcache_lzo_ops; |
238 | extern struct regcache_ops regcache_flat_ops; | 244 | extern struct regcache_ops regcache_flat_ops; |
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c index e4c45d2299c1..8d304e2a943d 100644 --- a/drivers/base/regmap/regmap-ac97.c +++ b/drivers/base/regmap/regmap-ac97.c | |||
@@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg, | |||
74 | } | 74 | } |
75 | 75 | ||
76 | static const struct regmap_bus ac97_regmap_bus = { | 76 | static const struct regmap_bus ac97_regmap_bus = { |
77 | .reg_write = regmap_ac97_reg_write, | 77 | .reg_write = regmap_ac97_reg_write, |
78 | .reg_read = regmap_ac97_reg_read, | 78 | .reg_read = regmap_ac97_reg_read, |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /** | 81 | /** |
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index 053150a7f9f2..4b76e33110a2 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/i2c.h> | 14 | #include <linux/i2c.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | 16 | ||
17 | #include "internal.h" | ||
17 | 18 | ||
18 | static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, | 19 | static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, |
19 | unsigned int *val) | 20 | unsigned int *val) |
@@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = { | |||
87 | .reg_read = regmap_smbus_word_reg_read, | 88 | .reg_read = regmap_smbus_word_reg_read, |
88 | }; | 89 | }; |
89 | 90 | ||
91 | static int regmap_smbus_word_read_swapped(void *context, unsigned int reg, | ||
92 | unsigned int *val) | ||
93 | { | ||
94 | struct device *dev = context; | ||
95 | struct i2c_client *i2c = to_i2c_client(dev); | ||
96 | int ret; | ||
97 | |||
98 | if (reg > 0xff) | ||
99 | return -EINVAL; | ||
100 | |||
101 | ret = i2c_smbus_read_word_swapped(i2c, reg); | ||
102 | if (ret < 0) | ||
103 | return ret; | ||
104 | |||
105 | *val = ret; | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, | ||
111 | unsigned int val) | ||
112 | { | ||
113 | struct device *dev = context; | ||
114 | struct i2c_client *i2c = to_i2c_client(dev); | ||
115 | |||
116 | if (val > 0xffff || reg > 0xff) | ||
117 | return -EINVAL; | ||
118 | |||
119 | return i2c_smbus_write_word_swapped(i2c, reg, val); | ||
120 | } | ||
121 | |||
122 | static struct regmap_bus regmap_smbus_word_swapped = { | ||
123 | .reg_write = regmap_smbus_word_write_swapped, | ||
124 | .reg_read = regmap_smbus_word_read_swapped, | ||
125 | }; | ||
126 | |||
90 | static int regmap_i2c_write(void *context, const void *data, size_t count) | 127 | static int regmap_i2c_write(void *context, const void *data, size_t count) |
91 | { | 128 | { |
92 | struct device *dev = context; | 129 | struct device *dev = context; |
@@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, | |||
180 | else if (config->val_bits == 16 && config->reg_bits == 8 && | 217 | else if (config->val_bits == 16 && config->reg_bits == 8 && |
181 | i2c_check_functionality(i2c->adapter, | 218 | i2c_check_functionality(i2c->adapter, |
182 | I2C_FUNC_SMBUS_WORD_DATA)) | 219 | I2C_FUNC_SMBUS_WORD_DATA)) |
183 | return ®map_smbus_word; | 220 | switch (regmap_get_val_endian(&i2c->dev, NULL, config)) { |
221 | case REGMAP_ENDIAN_LITTLE: | ||
222 | return ®map_smbus_word; | ||
223 | case REGMAP_ENDIAN_BIG: | ||
224 | return ®map_smbus_word_swapped; | ||
225 | default: /* everything else is not supported */ | ||
226 | break; | ||
227 | } | ||
184 | else if (config->val_bits == 8 && config->reg_bits == 8 && | 228 | else if (config->val_bits == 8 && config->reg_bits == 8 && |
185 | i2c_check_functionality(i2c->adapter, | 229 | i2c_check_functionality(i2c->adapter, |
186 | I2C_FUNC_SMBUS_BYTE_DATA)) | 230 | I2C_FUNC_SMBUS_BYTE_DATA)) |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index d2f8a818d200..f99b098ddabf 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, | |||
473 | return REGMAP_ENDIAN_BIG; | 473 | return REGMAP_ENDIAN_BIG; |
474 | } | 474 | } |
475 | 475 | ||
476 | static enum regmap_endian regmap_get_val_endian(struct device *dev, | 476 | enum regmap_endian regmap_get_val_endian(struct device *dev, |
477 | const struct regmap_bus *bus, | 477 | const struct regmap_bus *bus, |
478 | const struct regmap_config *config) | 478 | const struct regmap_config *config) |
479 | { | 479 | { |
480 | struct device_node *np; | 480 | struct device_node *np; |
481 | enum regmap_endian endian; | 481 | enum regmap_endian endian; |
@@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev, | |||
513 | /* Use this if no other value was found */ | 513 | /* Use this if no other value was found */ |
514 | return REGMAP_ENDIAN_BIG; | 514 | return REGMAP_ENDIAN_BIG; |
515 | } | 515 | } |
516 | EXPORT_SYMBOL_GPL(regmap_get_val_endian); | ||
516 | 517 | ||
517 | /** | 518 | /** |
518 | * regmap_init(): Initialise register map | 519 | * regmap_init(): Initialise register map |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3ec85dfce124..8a86b62466f7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev) | |||
2098 | * If an image has a non-zero parent overlap, get a reference to its | 2098 | * If an image has a non-zero parent overlap, get a reference to its |
2099 | * parent. | 2099 | * parent. |
2100 | * | 2100 | * |
2101 | * We must get the reference before checking for the overlap to | ||
2102 | * coordinate properly with zeroing the parent overlap in | ||
2103 | * rbd_dev_v2_parent_info() when an image gets flattened. We | ||
2104 | * drop it again if there is no overlap. | ||
2105 | * | ||
2106 | * Returns true if the rbd device has a parent with a non-zero | 2101 | * Returns true if the rbd device has a parent with a non-zero |
2107 | * overlap and a reference for it was successfully taken, or | 2102 | * overlap and a reference for it was successfully taken, or |
2108 | * false otherwise. | 2103 | * false otherwise. |
2109 | */ | 2104 | */ |
2110 | static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) | 2105 | static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) |
2111 | { | 2106 | { |
2112 | int counter; | 2107 | int counter = 0; |
2113 | 2108 | ||
2114 | if (!rbd_dev->parent_spec) | 2109 | if (!rbd_dev->parent_spec) |
2115 | return false; | 2110 | return false; |
2116 | 2111 | ||
2117 | counter = atomic_inc_return_safe(&rbd_dev->parent_ref); | 2112 | down_read(&rbd_dev->header_rwsem); |
2118 | if (counter > 0 && rbd_dev->parent_overlap) | 2113 | if (rbd_dev->parent_overlap) |
2119 | return true; | 2114 | counter = atomic_inc_return_safe(&rbd_dev->parent_ref); |
2120 | 2115 | up_read(&rbd_dev->header_rwsem); | |
2121 | /* Image was flattened, but parent is not yet torn down */ | ||
2122 | 2116 | ||
2123 | if (counter < 0) | 2117 | if (counter < 0) |
2124 | rbd_warn(rbd_dev, "parent reference overflow"); | 2118 | rbd_warn(rbd_dev, "parent reference overflow"); |
2125 | 2119 | ||
2126 | return false; | 2120 | return counter > 0; |
2127 | } | 2121 | } |
2128 | 2122 | ||
2129 | /* | 2123 | /* |
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
4239 | */ | 4233 | */ |
4240 | if (rbd_dev->parent_overlap) { | 4234 | if (rbd_dev->parent_overlap) { |
4241 | rbd_dev->parent_overlap = 0; | 4235 | rbd_dev->parent_overlap = 0; |
4242 | smp_mb(); | ||
4243 | rbd_dev_parent_put(rbd_dev); | 4236 | rbd_dev_parent_put(rbd_dev); |
4244 | pr_info("%s: clone image has been flattened\n", | 4237 | pr_info("%s: clone image has been flattened\n", |
4245 | rbd_dev->disk->disk_name); | 4238 | rbd_dev->disk->disk_name); |
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
4285 | * treat it specially. | 4278 | * treat it specially. |
4286 | */ | 4279 | */ |
4287 | rbd_dev->parent_overlap = overlap; | 4280 | rbd_dev->parent_overlap = overlap; |
4288 | smp_mb(); | ||
4289 | if (!overlap) { | 4281 | if (!overlap) { |
4290 | 4282 | ||
4291 | /* A null parent_spec indicates it's the initial probe */ | 4283 | /* A null parent_spec indicates it's the initial probe */ |
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) | |||
5114 | { | 5106 | { |
5115 | struct rbd_image_header *header; | 5107 | struct rbd_image_header *header; |
5116 | 5108 | ||
5117 | /* Drop parent reference unless it's already been done (or none) */ | 5109 | rbd_dev_parent_put(rbd_dev); |
5118 | |||
5119 | if (rbd_dev->parent_overlap) | ||
5120 | rbd_dev_parent_put(rbd_dev); | ||
5121 | 5110 | ||
5122 | /* Free dynamic fields from the header, then zero it out */ | 5111 | /* Free dynamic fields from the header, then zero it out */ |
5123 | 5112 | ||
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 63fc7f06a014..2a04d341e598 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/xen/hypervisor.h> | 47 | #include <asm/xen/hypervisor.h> |
48 | #include <asm/xen/hypercall.h> | 48 | #include <asm/xen/hypercall.h> |
49 | #include <xen/balloon.h> | 49 | #include <xen/balloon.h> |
50 | #include <xen/grant_table.h> | ||
50 | #include "common.h" | 51 | #include "common.h" |
51 | 52 | ||
52 | /* | 53 | /* |
@@ -100,7 +101,7 @@ module_param(log_stats, int, 0644); | |||
100 | 101 | ||
101 | #define BLKBACK_INVALID_HANDLE (~0) | 102 | #define BLKBACK_INVALID_HANDLE (~0) |
102 | 103 | ||
103 | /* Number of free pages to remove on each call to free_xenballooned_pages */ | 104 | /* Number of free pages to remove on each call to gnttab_free_pages */ |
104 | #define NUM_BATCH_FREE_PAGES 10 | 105 | #define NUM_BATCH_FREE_PAGES 10 |
105 | 106 | ||
106 | static inline int get_free_page(struct xen_blkif *blkif, struct page **page) | 107 | static inline int get_free_page(struct xen_blkif *blkif, struct page **page) |
@@ -111,7 +112,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page) | |||
111 | if (list_empty(&blkif->free_pages)) { | 112 | if (list_empty(&blkif->free_pages)) { |
112 | BUG_ON(blkif->free_pages_num != 0); | 113 | BUG_ON(blkif->free_pages_num != 0); |
113 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | 114 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); |
114 | return alloc_xenballooned_pages(1, page, false); | 115 | return gnttab_alloc_pages(1, page); |
115 | } | 116 | } |
116 | BUG_ON(blkif->free_pages_num == 0); | 117 | BUG_ON(blkif->free_pages_num == 0); |
117 | page[0] = list_first_entry(&blkif->free_pages, struct page, lru); | 118 | page[0] = list_first_entry(&blkif->free_pages, struct page, lru); |
@@ -151,14 +152,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) | |||
151 | blkif->free_pages_num--; | 152 | blkif->free_pages_num--; |
152 | if (++num_pages == NUM_BATCH_FREE_PAGES) { | 153 | if (++num_pages == NUM_BATCH_FREE_PAGES) { |
153 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | 154 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); |
154 | free_xenballooned_pages(num_pages, page); | 155 | gnttab_free_pages(num_pages, page); |
155 | spin_lock_irqsave(&blkif->free_pages_lock, flags); | 156 | spin_lock_irqsave(&blkif->free_pages_lock, flags); |
156 | num_pages = 0; | 157 | num_pages = 0; |
157 | } | 158 | } |
158 | } | 159 | } |
159 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); | 160 | spin_unlock_irqrestore(&blkif->free_pages_lock, flags); |
160 | if (num_pages != 0) | 161 | if (num_pages != 0) |
161 | free_xenballooned_pages(num_pages, page); | 162 | gnttab_free_pages(num_pages, page); |
162 | } | 163 | } |
163 | 164 | ||
164 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) | 165 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) |
@@ -262,6 +263,17 @@ static void put_persistent_gnt(struct xen_blkif *blkif, | |||
262 | atomic_dec(&blkif->persistent_gnt_in_use); | 263 | atomic_dec(&blkif->persistent_gnt_in_use); |
263 | } | 264 | } |
264 | 265 | ||
266 | static void free_persistent_gnts_unmap_callback(int result, | ||
267 | struct gntab_unmap_queue_data *data) | ||
268 | { | ||
269 | struct completion *c = data->data; | ||
270 | |||
271 | /* BUG_ON used to reproduce existing behaviour, | ||
272 | but is this the best way to deal with this? */ | ||
273 | BUG_ON(result); | ||
274 | complete(c); | ||
275 | } | ||
276 | |||
265 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | 277 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, |
266 | unsigned int num) | 278 | unsigned int num) |
267 | { | 279 | { |
@@ -269,8 +281,17 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
269 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 281 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
270 | struct persistent_gnt *persistent_gnt; | 282 | struct persistent_gnt *persistent_gnt; |
271 | struct rb_node *n; | 283 | struct rb_node *n; |
272 | int ret = 0; | ||
273 | int segs_to_unmap = 0; | 284 | int segs_to_unmap = 0; |
285 | struct gntab_unmap_queue_data unmap_data; | ||
286 | struct completion unmap_completion; | ||
287 | |||
288 | init_completion(&unmap_completion); | ||
289 | |||
290 | unmap_data.data = &unmap_completion; | ||
291 | unmap_data.done = &free_persistent_gnts_unmap_callback; | ||
292 | unmap_data.pages = pages; | ||
293 | unmap_data.unmap_ops = unmap; | ||
294 | unmap_data.kunmap_ops = NULL; | ||
274 | 295 | ||
275 | foreach_grant_safe(persistent_gnt, n, root, node) { | 296 | foreach_grant_safe(persistent_gnt, n, root, node) { |
276 | BUG_ON(persistent_gnt->handle == | 297 | BUG_ON(persistent_gnt->handle == |
@@ -285,9 +306,11 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
285 | 306 | ||
286 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || | 307 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || |
287 | !rb_next(&persistent_gnt->node)) { | 308 | !rb_next(&persistent_gnt->node)) { |
288 | ret = gnttab_unmap_refs(unmap, NULL, pages, | 309 | |
289 | segs_to_unmap); | 310 | unmap_data.count = segs_to_unmap; |
290 | BUG_ON(ret); | 311 | gnttab_unmap_refs_async(&unmap_data); |
312 | wait_for_completion(&unmap_completion); | ||
313 | |||
291 | put_free_pages(blkif, pages, segs_to_unmap); | 314 | put_free_pages(blkif, pages, segs_to_unmap); |
292 | segs_to_unmap = 0; | 315 | segs_to_unmap = 0; |
293 | } | 316 | } |
@@ -653,18 +676,14 @@ void xen_blkbk_free_caches(struct xen_blkif *blkif) | |||
653 | shrink_free_pagepool(blkif, 0 /* All */); | 676 | shrink_free_pagepool(blkif, 0 /* All */); |
654 | } | 677 | } |
655 | 678 | ||
656 | /* | 679 | static unsigned int xen_blkbk_unmap_prepare( |
657 | * Unmap the grant references, and also remove the M2P over-rides | 680 | struct xen_blkif *blkif, |
658 | * used in the 'pending_req'. | 681 | struct grant_page **pages, |
659 | */ | 682 | unsigned int num, |
660 | static void xen_blkbk_unmap(struct xen_blkif *blkif, | 683 | struct gnttab_unmap_grant_ref *unmap_ops, |
661 | struct grant_page *pages[], | 684 | struct page **unmap_pages) |
662 | int num) | ||
663 | { | 685 | { |
664 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
665 | struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
666 | unsigned int i, invcount = 0; | 686 | unsigned int i, invcount = 0; |
667 | int ret; | ||
668 | 687 | ||
669 | for (i = 0; i < num; i++) { | 688 | for (i = 0; i < num; i++) { |
670 | if (pages[i]->persistent_gnt != NULL) { | 689 | if (pages[i]->persistent_gnt != NULL) { |
@@ -674,21 +693,95 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, | |||
674 | if (pages[i]->handle == BLKBACK_INVALID_HANDLE) | 693 | if (pages[i]->handle == BLKBACK_INVALID_HANDLE) |
675 | continue; | 694 | continue; |
676 | unmap_pages[invcount] = pages[i]->page; | 695 | unmap_pages[invcount] = pages[i]->page; |
677 | gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), | 696 | gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page), |
678 | GNTMAP_host_map, pages[i]->handle); | 697 | GNTMAP_host_map, pages[i]->handle); |
679 | pages[i]->handle = BLKBACK_INVALID_HANDLE; | 698 | pages[i]->handle = BLKBACK_INVALID_HANDLE; |
680 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 699 | invcount++; |
681 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, | 700 | } |
682 | invcount); | 701 | |
702 | return invcount; | ||
703 | } | ||
704 | |||
705 | static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data) | ||
706 | { | ||
707 | struct pending_req* pending_req = (struct pending_req*) (data->data); | ||
708 | struct xen_blkif *blkif = pending_req->blkif; | ||
709 | |||
710 | /* BUG_ON used to reproduce existing behaviour, | ||
711 | but is this the best way to deal with this? */ | ||
712 | BUG_ON(result); | ||
713 | |||
714 | put_free_pages(blkif, data->pages, data->count); | ||
715 | make_response(blkif, pending_req->id, | ||
716 | pending_req->operation, pending_req->status); | ||
717 | free_req(blkif, pending_req); | ||
718 | /* | ||
719 | * Make sure the request is freed before releasing blkif, | ||
720 | * or there could be a race between free_req and the | ||
721 | * cleanup done in xen_blkif_free during shutdown. | ||
722 | * | ||
723 | * NB: The fact that we might try to wake up pending_free_wq | ||
724 | * before drain_complete (in case there's a drain going on) | ||
725 | * it's not a problem with our current implementation | ||
726 | * because we can assure there's no thread waiting on | ||
727 | * pending_free_wq if there's a drain going on, but it has | ||
728 | * to be taken into account if the current model is changed. | ||
729 | */ | ||
730 | if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { | ||
731 | complete(&blkif->drain_complete); | ||
732 | } | ||
733 | xen_blkif_put(blkif); | ||
734 | } | ||
735 | |||
736 | static void xen_blkbk_unmap_and_respond(struct pending_req *req) | ||
737 | { | ||
738 | struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data; | ||
739 | struct xen_blkif *blkif = req->blkif; | ||
740 | struct grant_page **pages = req->segments; | ||
741 | unsigned int invcount; | ||
742 | |||
743 | invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages, | ||
744 | req->unmap, req->unmap_pages); | ||
745 | |||
746 | work->data = req; | ||
747 | work->done = xen_blkbk_unmap_and_respond_callback; | ||
748 | work->unmap_ops = req->unmap; | ||
749 | work->kunmap_ops = NULL; | ||
750 | work->pages = req->unmap_pages; | ||
751 | work->count = invcount; | ||
752 | |||
753 | gnttab_unmap_refs_async(&req->gnttab_unmap_data); | ||
754 | } | ||
755 | |||
756 | |||
757 | /* | ||
758 | * Unmap the grant references. | ||
759 | * | ||
760 | * This could accumulate ops up to the batch size to reduce the number | ||
761 | * of hypercalls, but since this is only used in error paths there's | ||
762 | * no real need. | ||
763 | */ | ||
764 | static void xen_blkbk_unmap(struct xen_blkif *blkif, | ||
765 | struct grant_page *pages[], | ||
766 | int num) | ||
767 | { | ||
768 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
769 | struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
770 | unsigned int invcount = 0; | ||
771 | int ret; | ||
772 | |||
773 | while (num) { | ||
774 | unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
775 | |||
776 | invcount = xen_blkbk_unmap_prepare(blkif, pages, batch, | ||
777 | unmap, unmap_pages); | ||
778 | if (invcount) { | ||
779 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | ||
683 | BUG_ON(ret); | 780 | BUG_ON(ret); |
684 | put_free_pages(blkif, unmap_pages, invcount); | 781 | put_free_pages(blkif, unmap_pages, invcount); |
685 | invcount = 0; | ||
686 | } | 782 | } |
687 | } | 783 | pages += batch; |
688 | if (invcount) { | 784 | num -= batch; |
689 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | ||
690 | BUG_ON(ret); | ||
691 | put_free_pages(blkif, unmap_pages, invcount); | ||
692 | } | 785 | } |
693 | } | 786 | } |
694 | 787 | ||
@@ -982,32 +1075,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
982 | * the grant references associated with 'request' and provide | 1075 | * the grant references associated with 'request' and provide |
983 | * the proper response on the ring. | 1076 | * the proper response on the ring. |
984 | */ | 1077 | */ |
985 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 1078 | if (atomic_dec_and_test(&pending_req->pendcnt)) |
986 | struct xen_blkif *blkif = pending_req->blkif; | 1079 | xen_blkbk_unmap_and_respond(pending_req); |
987 | |||
988 | xen_blkbk_unmap(blkif, | ||
989 | pending_req->segments, | ||
990 | pending_req->nr_pages); | ||
991 | make_response(blkif, pending_req->id, | ||
992 | pending_req->operation, pending_req->status); | ||
993 | free_req(blkif, pending_req); | ||
994 | /* | ||
995 | * Make sure the request is freed before releasing blkif, | ||
996 | * or there could be a race between free_req and the | ||
997 | * cleanup done in xen_blkif_free during shutdown. | ||
998 | * | ||
999 | * NB: The fact that we might try to wake up pending_free_wq | ||
1000 | * before drain_complete (in case there's a drain going on) | ||
1001 | * it's not a problem with our current implementation | ||
1002 | * because we can assure there's no thread waiting on | ||
1003 | * pending_free_wq if there's a drain going on, but it has | ||
1004 | * to be taken into account if the current model is changed. | ||
1005 | */ | ||
1006 | if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { | ||
1007 | complete(&blkif->drain_complete); | ||
1008 | } | ||
1009 | xen_blkif_put(blkif); | ||
1010 | } | ||
1011 | } | 1080 | } |
1012 | 1081 | ||
1013 | /* | 1082 | /* |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index f65b807e3236..cc90a840e616 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -350,6 +350,9 @@ struct pending_req { | |||
350 | struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; | 350 | struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; |
351 | struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; | 351 | struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; |
352 | struct bio *biolist[MAX_INDIRECT_SEGMENTS]; | 352 | struct bio *biolist[MAX_INDIRECT_SEGMENTS]; |
353 | struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS]; | ||
354 | struct page *unmap_pages[MAX_INDIRECT_SEGMENTS]; | ||
355 | struct gntab_unmap_queue_data gnttab_unmap_data; | ||
353 | }; | 356 | }; |
354 | 357 | ||
355 | 358 | ||
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index d5d4cd82b9f7..5c0baa9ffc64 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -976,8 +976,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) | |||
976 | status = acpi_resource_to_address64(res, &addr); | 976 | status = acpi_resource_to_address64(res, &addr); |
977 | 977 | ||
978 | if (ACPI_SUCCESS(status)) { | 978 | if (ACPI_SUCCESS(status)) { |
979 | hdp->hd_phys_address = addr.minimum; | 979 | hdp->hd_phys_address = addr.address.minimum; |
980 | hdp->hd_address = ioremap(addr.minimum, addr.address_length); | 980 | hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length); |
981 | 981 | ||
982 | if (hpet_is_known(hdp)) { | 982 | if (hpet_is_known(hdp)) { |
983 | iounmap(hdp->hd_address); | 983 | iounmap(hdp->hd_address); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 04645c09fe5e..9cd6968e2f92 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f) | |||
569 | __u32 c = f->pool[2], d = f->pool[3]; | 569 | __u32 c = f->pool[2], d = f->pool[3]; |
570 | 570 | ||
571 | a += b; c += d; | 571 | a += b; c += d; |
572 | b = rol32(a, 6); d = rol32(c, 27); | 572 | b = rol32(b, 6); d = rol32(d, 27); |
573 | d ^= a; b ^= c; | 573 | d ^= a; b ^= c; |
574 | 574 | ||
575 | a += b; c += d; | 575 | a += b; c += d; |
576 | b = rol32(a, 16); d = rol32(c, 14); | 576 | b = rol32(b, 16); d = rol32(d, 14); |
577 | d ^= a; b ^= c; | 577 | d ^= a; b ^= c; |
578 | 578 | ||
579 | a += b; c += d; | 579 | a += b; c += d; |
580 | b = rol32(a, 6); d = rol32(c, 27); | 580 | b = rol32(b, 6); d = rol32(d, 27); |
581 | d ^= a; b ^= c; | 581 | d ^= a; b ^= c; |
582 | 582 | ||
583 | a += b; c += d; | 583 | a += b; c += d; |
584 | b = rol32(a, 16); d = rol32(c, 14); | 584 | b = rol32(b, 16); d = rol32(d, 14); |
585 | d ^= a; b ^= c; | 585 | d ^= a; b ^= c; |
586 | 586 | ||
587 | f->pool[0] = a; f->pool[1] = b; | 587 | f->pool[0] = a; f->pool[1] = b; |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 3f44f292d066..91f86131bb7a 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
@@ -13,6 +13,7 @@ config COMMON_CLK | |||
13 | bool | 13 | bool |
14 | select HAVE_CLK_PREPARE | 14 | select HAVE_CLK_PREPARE |
15 | select CLKDEV_LOOKUP | 15 | select CLKDEV_LOOKUP |
16 | select SRCU | ||
16 | ---help--- | 17 | ---help--- |
17 | The common clock framework is a single definition of struct | 18 | The common clock framework is a single definition of struct |
18 | clk, useful across many platforms, as well as an | 19 | clk, useful across many platforms, as well as an |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 29b2ef5a68b9..a171fef2c2b6 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling" | |||
2 | 2 | ||
3 | config CPU_FREQ | 3 | config CPU_FREQ |
4 | bool "CPU Frequency scaling" | 4 | bool "CPU Frequency scaling" |
5 | select SRCU | ||
5 | help | 6 | help |
6 | CPU Frequency scaling allows you to change the clock speed of | 7 | CPU Frequency scaling allows you to change the clock speed of |
7 | CPUs on the fly. This is a nice method to save power, because | 8 | CPUs on the fly. This is a nice method to save power, because |
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 89ae88f91895..c59bdcb83217 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 | |||
@@ -57,6 +57,16 @@ config X86_ACPI_CPUFREQ_CPB | |||
57 | By enabling this option the acpi_cpufreq driver provides the old | 57 | By enabling this option the acpi_cpufreq driver provides the old |
58 | entry in addition to the new boost ones, for compatibility reasons. | 58 | entry in addition to the new boost ones, for compatibility reasons. |
59 | 59 | ||
60 | config X86_SFI_CPUFREQ | ||
61 | tristate "SFI Performance-States driver" | ||
62 | depends on X86_INTEL_MID && SFI | ||
63 | help | ||
64 | This adds a CPUFreq driver for some Silvermont based Intel Atom | ||
65 | architectures like Z34xx and Z35xx which enumerate processor | ||
66 | performance states through SFI. | ||
67 | |||
68 | If in doubt, say N. | ||
69 | |||
60 | config ELAN_CPUFREQ | 70 | config ELAN_CPUFREQ |
61 | tristate "AMD Elan SC400 and SC410" | 71 | tristate "AMD Elan SC400 and SC410" |
62 | depends on MELAN | 72 | depends on MELAN |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b3ca7b0b2c33..8b4220ac888b 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -41,6 +41,7 @@ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o | |||
41 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o | 41 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o |
42 | obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o | 42 | obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o |
43 | obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o | 43 | obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o |
44 | obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o | ||
44 | 45 | ||
45 | ################################################################################## | 46 | ################################################################################## |
46 | # ARM SoC drivers | 47 | # ARM SoC drivers |
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index fde97d6e31d6..bab67db54b7e 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c | |||
@@ -320,8 +320,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy) | |||
320 | { | 320 | { |
321 | struct private_data *priv = policy->driver_data; | 321 | struct private_data *priv = policy->driver_data; |
322 | 322 | ||
323 | if (priv->cdev) | 323 | cpufreq_cooling_unregister(priv->cdev); |
324 | cpufreq_cooling_unregister(priv->cdev); | ||
325 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); | 324 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
326 | of_free_opp_table(priv->cpu_dev); | 325 | of_free_opp_table(priv->cpu_dev); |
327 | clk_put(policy->clk); | 326 | clk_put(policy->clk); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 46bed4f81cde..28e59a48b35f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -27,9 +27,21 @@ | |||
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/suspend.h> | 29 | #include <linux/suspend.h> |
30 | #include <linux/syscore_ops.h> | ||
30 | #include <linux/tick.h> | 31 | #include <linux/tick.h> |
31 | #include <trace/events/power.h> | 32 | #include <trace/events/power.h> |
32 | 33 | ||
34 | /* Macros to iterate over lists */ | ||
35 | /* Iterate over online CPUs policies */ | ||
36 | static LIST_HEAD(cpufreq_policy_list); | ||
37 | #define for_each_policy(__policy) \ | ||
38 | list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) | ||
39 | |||
40 | /* Iterate over governors */ | ||
41 | static LIST_HEAD(cpufreq_governor_list); | ||
42 | #define for_each_governor(__governor) \ | ||
43 | list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) | ||
44 | |||
33 | /** | 45 | /** |
34 | * The "cpufreq driver" - the arch- or hardware-dependent low | 46 | * The "cpufreq driver" - the arch- or hardware-dependent low |
35 | * level driver of CPUFreq support, and its spinlock. This lock | 47 | * level driver of CPUFreq support, and its spinlock. This lock |
@@ -40,7 +52,6 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); | |||
40 | static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); | 52 | static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); |
41 | static DEFINE_RWLOCK(cpufreq_driver_lock); | 53 | static DEFINE_RWLOCK(cpufreq_driver_lock); |
42 | DEFINE_MUTEX(cpufreq_governor_lock); | 54 | DEFINE_MUTEX(cpufreq_governor_lock); |
43 | static LIST_HEAD(cpufreq_policy_list); | ||
44 | 55 | ||
45 | /* This one keeps track of the previously set governor of a removed CPU */ | 56 | /* This one keeps track of the previously set governor of a removed CPU */ |
46 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); | 57 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); |
@@ -62,7 +73,7 @@ static DECLARE_RWSEM(cpufreq_rwsem); | |||
62 | /* internal prototypes */ | 73 | /* internal prototypes */ |
63 | static int __cpufreq_governor(struct cpufreq_policy *policy, | 74 | static int __cpufreq_governor(struct cpufreq_policy *policy, |
64 | unsigned int event); | 75 | unsigned int event); |
65 | static unsigned int __cpufreq_get(unsigned int cpu); | 76 | static unsigned int __cpufreq_get(struct cpufreq_policy *policy); |
66 | static void handle_update(struct work_struct *work); | 77 | static void handle_update(struct work_struct *work); |
67 | 78 | ||
68 | /** | 79 | /** |
@@ -93,7 +104,6 @@ void disable_cpufreq(void) | |||
93 | { | 104 | { |
94 | off = 1; | 105 | off = 1; |
95 | } | 106 | } |
96 | static LIST_HEAD(cpufreq_governor_list); | ||
97 | static DEFINE_MUTEX(cpufreq_governor_mutex); | 107 | static DEFINE_MUTEX(cpufreq_governor_mutex); |
98 | 108 | ||
99 | bool have_governor_per_policy(void) | 109 | bool have_governor_per_policy(void) |
@@ -202,7 +212,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | |||
202 | struct cpufreq_policy *policy = NULL; | 212 | struct cpufreq_policy *policy = NULL; |
203 | unsigned long flags; | 213 | unsigned long flags; |
204 | 214 | ||
205 | if (cpufreq_disabled() || (cpu >= nr_cpu_ids)) | 215 | if (cpu >= nr_cpu_ids) |
206 | return NULL; | 216 | return NULL; |
207 | 217 | ||
208 | if (!down_read_trylock(&cpufreq_rwsem)) | 218 | if (!down_read_trylock(&cpufreq_rwsem)) |
@@ -229,9 +239,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | |||
229 | 239 | ||
230 | void cpufreq_cpu_put(struct cpufreq_policy *policy) | 240 | void cpufreq_cpu_put(struct cpufreq_policy *policy) |
231 | { | 241 | { |
232 | if (cpufreq_disabled()) | ||
233 | return; | ||
234 | |||
235 | kobject_put(&policy->kobj); | 242 | kobject_put(&policy->kobj); |
236 | up_read(&cpufreq_rwsem); | 243 | up_read(&cpufreq_rwsem); |
237 | } | 244 | } |
@@ -249,12 +256,12 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | |||
249 | * systems as each CPU might be scaled differently. So, use the arch | 256 | * systems as each CPU might be scaled differently. So, use the arch |
250 | * per-CPU loops_per_jiffy value wherever possible. | 257 | * per-CPU loops_per_jiffy value wherever possible. |
251 | */ | 258 | */ |
252 | #ifndef CONFIG_SMP | ||
253 | static unsigned long l_p_j_ref; | ||
254 | static unsigned int l_p_j_ref_freq; | ||
255 | |||
256 | static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | 259 | static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) |
257 | { | 260 | { |
261 | #ifndef CONFIG_SMP | ||
262 | static unsigned long l_p_j_ref; | ||
263 | static unsigned int l_p_j_ref_freq; | ||
264 | |||
258 | if (ci->flags & CPUFREQ_CONST_LOOPS) | 265 | if (ci->flags & CPUFREQ_CONST_LOOPS) |
259 | return; | 266 | return; |
260 | 267 | ||
@@ -270,13 +277,8 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
270 | pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", | 277 | pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", |
271 | loops_per_jiffy, ci->new); | 278 | loops_per_jiffy, ci->new); |
272 | } | 279 | } |
273 | } | ||
274 | #else | ||
275 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | ||
276 | { | ||
277 | return; | ||
278 | } | ||
279 | #endif | 280 | #endif |
281 | } | ||
280 | 282 | ||
281 | static void __cpufreq_notify_transition(struct cpufreq_policy *policy, | 283 | static void __cpufreq_notify_transition(struct cpufreq_policy *policy, |
282 | struct cpufreq_freqs *freqs, unsigned int state) | 284 | struct cpufreq_freqs *freqs, unsigned int state) |
@@ -432,11 +434,11 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, | |||
432 | } | 434 | } |
433 | define_one_global_rw(boost); | 435 | define_one_global_rw(boost); |
434 | 436 | ||
435 | static struct cpufreq_governor *__find_governor(const char *str_governor) | 437 | static struct cpufreq_governor *find_governor(const char *str_governor) |
436 | { | 438 | { |
437 | struct cpufreq_governor *t; | 439 | struct cpufreq_governor *t; |
438 | 440 | ||
439 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) | 441 | for_each_governor(t) |
440 | if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) | 442 | if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) |
441 | return t; | 443 | return t; |
442 | 444 | ||
@@ -463,12 +465,12 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | |||
463 | *policy = CPUFREQ_POLICY_POWERSAVE; | 465 | *policy = CPUFREQ_POLICY_POWERSAVE; |
464 | err = 0; | 466 | err = 0; |
465 | } | 467 | } |
466 | } else if (has_target()) { | 468 | } else { |
467 | struct cpufreq_governor *t; | 469 | struct cpufreq_governor *t; |
468 | 470 | ||
469 | mutex_lock(&cpufreq_governor_mutex); | 471 | mutex_lock(&cpufreq_governor_mutex); |
470 | 472 | ||
471 | t = __find_governor(str_governor); | 473 | t = find_governor(str_governor); |
472 | 474 | ||
473 | if (t == NULL) { | 475 | if (t == NULL) { |
474 | int ret; | 476 | int ret; |
@@ -478,7 +480,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | |||
478 | mutex_lock(&cpufreq_governor_mutex); | 480 | mutex_lock(&cpufreq_governor_mutex); |
479 | 481 | ||
480 | if (ret == 0) | 482 | if (ret == 0) |
481 | t = __find_governor(str_governor); | 483 | t = find_governor(str_governor); |
482 | } | 484 | } |
483 | 485 | ||
484 | if (t != NULL) { | 486 | if (t != NULL) { |
@@ -513,8 +515,7 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); | |||
513 | show_one(scaling_min_freq, min); | 515 | show_one(scaling_min_freq, min); |
514 | show_one(scaling_max_freq, max); | 516 | show_one(scaling_max_freq, max); |
515 | 517 | ||
516 | static ssize_t show_scaling_cur_freq( | 518 | static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) |
517 | struct cpufreq_policy *policy, char *buf) | ||
518 | { | 519 | { |
519 | ssize_t ret; | 520 | ssize_t ret; |
520 | 521 | ||
@@ -563,7 +564,7 @@ store_one(scaling_max_freq, max); | |||
563 | static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, | 564 | static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, |
564 | char *buf) | 565 | char *buf) |
565 | { | 566 | { |
566 | unsigned int cur_freq = __cpufreq_get(policy->cpu); | 567 | unsigned int cur_freq = __cpufreq_get(policy); |
567 | if (!cur_freq) | 568 | if (!cur_freq) |
568 | return sprintf(buf, "<unknown>"); | 569 | return sprintf(buf, "<unknown>"); |
569 | return sprintf(buf, "%u\n", cur_freq); | 570 | return sprintf(buf, "%u\n", cur_freq); |
@@ -639,7 +640,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, | |||
639 | goto out; | 640 | goto out; |
640 | } | 641 | } |
641 | 642 | ||
642 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | 643 | for_each_governor(t) { |
643 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) | 644 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) |
644 | - (CPUFREQ_NAME_LEN + 2))) | 645 | - (CPUFREQ_NAME_LEN + 2))) |
645 | goto out; | 646 | goto out; |
@@ -902,7 +903,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, | |||
902 | 903 | ||
903 | /* set up files for this cpu device */ | 904 | /* set up files for this cpu device */ |
904 | drv_attr = cpufreq_driver->attr; | 905 | drv_attr = cpufreq_driver->attr; |
905 | while ((drv_attr) && (*drv_attr)) { | 906 | while (drv_attr && *drv_attr) { |
906 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); | 907 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); |
907 | if (ret) | 908 | if (ret) |
908 | return ret; | 909 | return ret; |
@@ -936,7 +937,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
936 | memcpy(&new_policy, policy, sizeof(*policy)); | 937 | memcpy(&new_policy, policy, sizeof(*policy)); |
937 | 938 | ||
938 | /* Update governor of new_policy to the governor used before hotplug */ | 939 | /* Update governor of new_policy to the governor used before hotplug */ |
939 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); | 940 | gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); |
940 | if (gov) | 941 | if (gov) |
941 | pr_debug("Restoring governor %s for cpu %d\n", | 942 | pr_debug("Restoring governor %s for cpu %d\n", |
942 | policy->governor->name, policy->cpu); | 943 | policy->governor->name, policy->cpu); |
@@ -958,7 +959,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) | |||
958 | } | 959 | } |
959 | } | 960 | } |
960 | 961 | ||
961 | #ifdef CONFIG_HOTPLUG_CPU | ||
962 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | 962 | static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, |
963 | unsigned int cpu, struct device *dev) | 963 | unsigned int cpu, struct device *dev) |
964 | { | 964 | { |
@@ -996,7 +996,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, | |||
996 | 996 | ||
997 | return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | 997 | return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); |
998 | } | 998 | } |
999 | #endif | ||
1000 | 999 | ||
1001 | static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) | 1000 | static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) |
1002 | { | 1001 | { |
@@ -1033,6 +1032,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) | |||
1033 | init_rwsem(&policy->rwsem); | 1032 | init_rwsem(&policy->rwsem); |
1034 | spin_lock_init(&policy->transition_lock); | 1033 | spin_lock_init(&policy->transition_lock); |
1035 | init_waitqueue_head(&policy->transition_wait); | 1034 | init_waitqueue_head(&policy->transition_wait); |
1035 | init_completion(&policy->kobj_unregister); | ||
1036 | INIT_WORK(&policy->update, handle_update); | ||
1036 | 1037 | ||
1037 | return policy; | 1038 | return policy; |
1038 | 1039 | ||
@@ -1091,15 +1092,9 @@ static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, | |||
1091 | } | 1092 | } |
1092 | 1093 | ||
1093 | down_write(&policy->rwsem); | 1094 | down_write(&policy->rwsem); |
1094 | |||
1095 | policy->last_cpu = policy->cpu; | ||
1096 | policy->cpu = cpu; | 1095 | policy->cpu = cpu; |
1097 | |||
1098 | up_write(&policy->rwsem); | 1096 | up_write(&policy->rwsem); |
1099 | 1097 | ||
1100 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
1101 | CPUFREQ_UPDATE_POLICY_CPU, policy); | ||
1102 | |||
1103 | return 0; | 1098 | return 0; |
1104 | } | 1099 | } |
1105 | 1100 | ||
@@ -1110,41 +1105,32 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
1110 | struct cpufreq_policy *policy; | 1105 | struct cpufreq_policy *policy; |
1111 | unsigned long flags; | 1106 | unsigned long flags; |
1112 | bool recover_policy = cpufreq_suspended; | 1107 | bool recover_policy = cpufreq_suspended; |
1113 | #ifdef CONFIG_HOTPLUG_CPU | ||
1114 | struct cpufreq_policy *tpolicy; | ||
1115 | #endif | ||
1116 | 1108 | ||
1117 | if (cpu_is_offline(cpu)) | 1109 | if (cpu_is_offline(cpu)) |
1118 | return 0; | 1110 | return 0; |
1119 | 1111 | ||
1120 | pr_debug("adding CPU %u\n", cpu); | 1112 | pr_debug("adding CPU %u\n", cpu); |
1121 | 1113 | ||
1122 | #ifdef CONFIG_SMP | ||
1123 | /* check whether a different CPU already registered this | 1114 | /* check whether a different CPU already registered this |
1124 | * CPU because it is in the same boat. */ | 1115 | * CPU because it is in the same boat. */ |
1125 | policy = cpufreq_cpu_get(cpu); | 1116 | policy = cpufreq_cpu_get_raw(cpu); |
1126 | if (unlikely(policy)) { | 1117 | if (unlikely(policy)) |
1127 | cpufreq_cpu_put(policy); | ||
1128 | return 0; | 1118 | return 0; |
1129 | } | ||
1130 | #endif | ||
1131 | 1119 | ||
1132 | if (!down_read_trylock(&cpufreq_rwsem)) | 1120 | if (!down_read_trylock(&cpufreq_rwsem)) |
1133 | return 0; | 1121 | return 0; |
1134 | 1122 | ||
1135 | #ifdef CONFIG_HOTPLUG_CPU | ||
1136 | /* Check if this cpu was hot-unplugged earlier and has siblings */ | 1123 | /* Check if this cpu was hot-unplugged earlier and has siblings */ |
1137 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 1124 | read_lock_irqsave(&cpufreq_driver_lock, flags); |
1138 | list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) { | 1125 | for_each_policy(policy) { |
1139 | if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) { | 1126 | if (cpumask_test_cpu(cpu, policy->related_cpus)) { |
1140 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1127 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1141 | ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev); | 1128 | ret = cpufreq_add_policy_cpu(policy, cpu, dev); |
1142 | up_read(&cpufreq_rwsem); | 1129 | up_read(&cpufreq_rwsem); |
1143 | return ret; | 1130 | return ret; |
1144 | } | 1131 | } |
1145 | } | 1132 | } |
1146 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1133 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1147 | #endif | ||
1148 | 1134 | ||
1149 | /* | 1135 | /* |
1150 | * Restore the saved policy when doing light-weight init and fall back | 1136 | * Restore the saved policy when doing light-weight init and fall back |
@@ -1171,9 +1157,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
1171 | 1157 | ||
1172 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 1158 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
1173 | 1159 | ||
1174 | init_completion(&policy->kobj_unregister); | ||
1175 | INIT_WORK(&policy->update, handle_update); | ||
1176 | |||
1177 | /* call driver. From then on the cpufreq must be able | 1160 | /* call driver. From then on the cpufreq must be able |
1178 | * to accept all calls to ->verify and ->setpolicy for this CPU | 1161 | * to accept all calls to ->verify and ->setpolicy for this CPU |
1179 | */ | 1162 | */ |
@@ -1371,11 +1354,10 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1371 | pr_err("%s: Failed to stop governor\n", __func__); | 1354 | pr_err("%s: Failed to stop governor\n", __func__); |
1372 | return ret; | 1355 | return ret; |
1373 | } | 1356 | } |
1374 | } | ||
1375 | 1357 | ||
1376 | if (!cpufreq_driver->setpolicy) | ||
1377 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), | 1358 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), |
1378 | policy->governor->name, CPUFREQ_NAME_LEN); | 1359 | policy->governor->name, CPUFREQ_NAME_LEN); |
1360 | } | ||
1379 | 1361 | ||
1380 | down_read(&policy->rwsem); | 1362 | down_read(&policy->rwsem); |
1381 | cpus = cpumask_weight(policy->cpus); | 1363 | cpus = cpumask_weight(policy->cpus); |
@@ -1416,9 +1398,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1416 | unsigned long flags; | 1398 | unsigned long flags; |
1417 | struct cpufreq_policy *policy; | 1399 | struct cpufreq_policy *policy; |
1418 | 1400 | ||
1419 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 1401 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
1420 | policy = per_cpu(cpufreq_cpu_data, cpu); | 1402 | policy = per_cpu(cpufreq_cpu_data, cpu); |
1421 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1403 | per_cpu(cpufreq_cpu_data, cpu) = NULL; |
1404 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1422 | 1405 | ||
1423 | if (!policy) { | 1406 | if (!policy) { |
1424 | pr_debug("%s: No cpu_data found\n", __func__); | 1407 | pr_debug("%s: No cpu_data found\n", __func__); |
@@ -1473,7 +1456,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1473 | } | 1456 | } |
1474 | } | 1457 | } |
1475 | 1458 | ||
1476 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1477 | return 0; | 1459 | return 0; |
1478 | } | 1460 | } |
1479 | 1461 | ||
@@ -1510,30 +1492,23 @@ static void handle_update(struct work_struct *work) | |||
1510 | /** | 1492 | /** |
1511 | * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're | 1493 | * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're |
1512 | * in deep trouble. | 1494 | * in deep trouble. |
1513 | * @cpu: cpu number | 1495 | * @policy: policy managing CPUs |
1514 | * @old_freq: CPU frequency the kernel thinks the CPU runs at | ||
1515 | * @new_freq: CPU frequency the CPU actually runs at | 1496 | * @new_freq: CPU frequency the CPU actually runs at |
1516 | * | 1497 | * |
1517 | * We adjust to current frequency first, and need to clean up later. | 1498 | * We adjust to current frequency first, and need to clean up later. |
1518 | * So either call to cpufreq_update_policy() or schedule handle_update()). | 1499 | * So either call to cpufreq_update_policy() or schedule handle_update()). |
1519 | */ | 1500 | */ |
1520 | static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | 1501 | static void cpufreq_out_of_sync(struct cpufreq_policy *policy, |
1521 | unsigned int new_freq) | 1502 | unsigned int new_freq) |
1522 | { | 1503 | { |
1523 | struct cpufreq_policy *policy; | ||
1524 | struct cpufreq_freqs freqs; | 1504 | struct cpufreq_freqs freqs; |
1525 | unsigned long flags; | ||
1526 | 1505 | ||
1527 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", | 1506 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", |
1528 | old_freq, new_freq); | 1507 | policy->cur, new_freq); |
1529 | 1508 | ||
1530 | freqs.old = old_freq; | 1509 | freqs.old = policy->cur; |
1531 | freqs.new = new_freq; | 1510 | freqs.new = new_freq; |
1532 | 1511 | ||
1533 | read_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1534 | policy = per_cpu(cpufreq_cpu_data, cpu); | ||
1535 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1536 | |||
1537 | cpufreq_freq_transition_begin(policy, &freqs); | 1512 | cpufreq_freq_transition_begin(policy, &freqs); |
1538 | cpufreq_freq_transition_end(policy, &freqs, 0); | 1513 | cpufreq_freq_transition_end(policy, &freqs, 0); |
1539 | } | 1514 | } |
@@ -1583,22 +1558,21 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu) | |||
1583 | } | 1558 | } |
1584 | EXPORT_SYMBOL(cpufreq_quick_get_max); | 1559 | EXPORT_SYMBOL(cpufreq_quick_get_max); |
1585 | 1560 | ||
1586 | static unsigned int __cpufreq_get(unsigned int cpu) | 1561 | static unsigned int __cpufreq_get(struct cpufreq_policy *policy) |
1587 | { | 1562 | { |
1588 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | ||
1589 | unsigned int ret_freq = 0; | 1563 | unsigned int ret_freq = 0; |
1590 | 1564 | ||
1591 | if (!cpufreq_driver->get) | 1565 | if (!cpufreq_driver->get) |
1592 | return ret_freq; | 1566 | return ret_freq; |
1593 | 1567 | ||
1594 | ret_freq = cpufreq_driver->get(cpu); | 1568 | ret_freq = cpufreq_driver->get(policy->cpu); |
1595 | 1569 | ||
1596 | if (ret_freq && policy->cur && | 1570 | if (ret_freq && policy->cur && |
1597 | !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | 1571 | !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { |
1598 | /* verify no discrepancy between actual and | 1572 | /* verify no discrepancy between actual and |
1599 | saved value exists */ | 1573 | saved value exists */ |
1600 | if (unlikely(ret_freq != policy->cur)) { | 1574 | if (unlikely(ret_freq != policy->cur)) { |
1601 | cpufreq_out_of_sync(cpu, policy->cur, ret_freq); | 1575 | cpufreq_out_of_sync(policy, ret_freq); |
1602 | schedule_work(&policy->update); | 1576 | schedule_work(&policy->update); |
1603 | } | 1577 | } |
1604 | } | 1578 | } |
@@ -1619,7 +1593,7 @@ unsigned int cpufreq_get(unsigned int cpu) | |||
1619 | 1593 | ||
1620 | if (policy) { | 1594 | if (policy) { |
1621 | down_read(&policy->rwsem); | 1595 | down_read(&policy->rwsem); |
1622 | ret_freq = __cpufreq_get(cpu); | 1596 | ret_freq = __cpufreq_get(policy); |
1623 | up_read(&policy->rwsem); | 1597 | up_read(&policy->rwsem); |
1624 | 1598 | ||
1625 | cpufreq_cpu_put(policy); | 1599 | cpufreq_cpu_put(policy); |
@@ -1682,7 +1656,7 @@ void cpufreq_suspend(void) | |||
1682 | 1656 | ||
1683 | pr_debug("%s: Suspending Governors\n", __func__); | 1657 | pr_debug("%s: Suspending Governors\n", __func__); |
1684 | 1658 | ||
1685 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { | 1659 | for_each_policy(policy) { |
1686 | if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) | 1660 | if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) |
1687 | pr_err("%s: Failed to stop governor for policy: %p\n", | 1661 | pr_err("%s: Failed to stop governor for policy: %p\n", |
1688 | __func__, policy); | 1662 | __func__, policy); |
@@ -1716,7 +1690,7 @@ void cpufreq_resume(void) | |||
1716 | 1690 | ||
1717 | pr_debug("%s: Resuming Governors\n", __func__); | 1691 | pr_debug("%s: Resuming Governors\n", __func__); |
1718 | 1692 | ||
1719 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { | 1693 | for_each_policy(policy) { |
1720 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) | 1694 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) |
1721 | pr_err("%s: Failed to resume driver: %p\n", __func__, | 1695 | pr_err("%s: Failed to resume driver: %p\n", __func__, |
1722 | policy); | 1696 | policy); |
@@ -2006,10 +1980,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
2006 | } | 1980 | } |
2007 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1981 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
2008 | 1982 | ||
2009 | /* | ||
2010 | * when "event" is CPUFREQ_GOV_LIMITS | ||
2011 | */ | ||
2012 | |||
2013 | static int __cpufreq_governor(struct cpufreq_policy *policy, | 1983 | static int __cpufreq_governor(struct cpufreq_policy *policy, |
2014 | unsigned int event) | 1984 | unsigned int event) |
2015 | { | 1985 | { |
@@ -2107,7 +2077,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) | |||
2107 | 2077 | ||
2108 | governor->initialized = 0; | 2078 | governor->initialized = 0; |
2109 | err = -EBUSY; | 2079 | err = -EBUSY; |
2110 | if (__find_governor(governor->name) == NULL) { | 2080 | if (!find_governor(governor->name)) { |
2111 | err = 0; | 2081 | err = 0; |
2112 | list_add(&governor->governor_list, &cpufreq_governor_list); | 2082 | list_add(&governor->governor_list, &cpufreq_governor_list); |
2113 | } | 2083 | } |
@@ -2307,8 +2277,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
2307 | policy->cur = new_policy.cur; | 2277 | policy->cur = new_policy.cur; |
2308 | } else { | 2278 | } else { |
2309 | if (policy->cur != new_policy.cur && has_target()) | 2279 | if (policy->cur != new_policy.cur && has_target()) |
2310 | cpufreq_out_of_sync(cpu, policy->cur, | 2280 | cpufreq_out_of_sync(policy, new_policy.cur); |
2311 | new_policy.cur); | ||
2312 | } | 2281 | } |
2313 | } | 2282 | } |
2314 | 2283 | ||
@@ -2364,7 +2333,7 @@ static int cpufreq_boost_set_sw(int state) | |||
2364 | struct cpufreq_policy *policy; | 2333 | struct cpufreq_policy *policy; |
2365 | int ret = -EINVAL; | 2334 | int ret = -EINVAL; |
2366 | 2335 | ||
2367 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { | 2336 | for_each_policy(policy) { |
2368 | freq_table = cpufreq_frequency_get_table(policy->cpu); | 2337 | freq_table = cpufreq_frequency_get_table(policy->cpu); |
2369 | if (freq_table) { | 2338 | if (freq_table) { |
2370 | ret = cpufreq_frequency_table_cpuinfo(policy, | 2339 | ret = cpufreq_frequency_table_cpuinfo(policy, |
@@ -2454,9 +2423,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2454 | 2423 | ||
2455 | pr_debug("trying to register driver %s\n", driver_data->name); | 2424 | pr_debug("trying to register driver %s\n", driver_data->name); |
2456 | 2425 | ||
2457 | if (driver_data->setpolicy) | ||
2458 | driver_data->flags |= CPUFREQ_CONST_LOOPS; | ||
2459 | |||
2460 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 2426 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
2461 | if (cpufreq_driver) { | 2427 | if (cpufreq_driver) { |
2462 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 2428 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
@@ -2465,6 +2431,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2465 | cpufreq_driver = driver_data; | 2431 | cpufreq_driver = driver_data; |
2466 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 2432 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
2467 | 2433 | ||
2434 | if (driver_data->setpolicy) | ||
2435 | driver_data->flags |= CPUFREQ_CONST_LOOPS; | ||
2436 | |||
2468 | if (cpufreq_boost_supported()) { | 2437 | if (cpufreq_boost_supported()) { |
2469 | /* | 2438 | /* |
2470 | * Check if driver provides function to enable boost - | 2439 | * Check if driver provides function to enable boost - |
@@ -2485,23 +2454,12 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2485 | if (ret) | 2454 | if (ret) |
2486 | goto err_boost_unreg; | 2455 | goto err_boost_unreg; |
2487 | 2456 | ||
2488 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { | 2457 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && |
2489 | int i; | 2458 | list_empty(&cpufreq_policy_list)) { |
2490 | ret = -ENODEV; | ||
2491 | |||
2492 | /* check for at least one working CPU */ | ||
2493 | for (i = 0; i < nr_cpu_ids; i++) | ||
2494 | if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) { | ||
2495 | ret = 0; | ||
2496 | break; | ||
2497 | } | ||
2498 | |||
2499 | /* if all ->init() calls failed, unregister */ | 2459 | /* if all ->init() calls failed, unregister */ |
2500 | if (ret) { | 2460 | pr_debug("%s: No CPU initialized for driver %s\n", __func__, |
2501 | pr_debug("no CPU initialized for driver %s\n", | 2461 | driver_data->name); |
2502 | driver_data->name); | 2462 | goto err_if_unreg; |
2503 | goto err_if_unreg; | ||
2504 | } | ||
2505 | } | 2463 | } |
2506 | 2464 | ||
2507 | register_hotcpu_notifier(&cpufreq_cpu_notifier); | 2465 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
@@ -2556,6 +2514,14 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
2556 | } | 2514 | } |
2557 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | 2515 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); |
2558 | 2516 | ||
2517 | /* | ||
2518 | * Stop cpufreq at shutdown to make sure it isn't holding any locks | ||
2519 | * or mutexes when secondary CPUs are halted. | ||
2520 | */ | ||
2521 | static struct syscore_ops cpufreq_syscore_ops = { | ||
2522 | .shutdown = cpufreq_suspend, | ||
2523 | }; | ||
2524 | |||
2559 | static int __init cpufreq_core_init(void) | 2525 | static int __init cpufreq_core_init(void) |
2560 | { | 2526 | { |
2561 | if (cpufreq_disabled()) | 2527 | if (cpufreq_disabled()) |
@@ -2564,6 +2530,8 @@ static int __init cpufreq_core_init(void) | |||
2564 | cpufreq_global_kobject = kobject_create(); | 2530 | cpufreq_global_kobject = kobject_create(); |
2565 | BUG_ON(!cpufreq_global_kobject); | 2531 | BUG_ON(!cpufreq_global_kobject); |
2566 | 2532 | ||
2533 | register_syscore_ops(&cpufreq_syscore_ops); | ||
2534 | |||
2567 | return 0; | 2535 | return 0; |
2568 | } | 2536 | } |
2569 | core_initcall(cpufreq_core_init); | 2537 | core_initcall(cpufreq_core_init); |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 0cd9b4dcef99..5e370a30a964 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -18,7 +18,6 @@ | |||
18 | static spinlock_t cpufreq_stats_lock; | 18 | static spinlock_t cpufreq_stats_lock; |
19 | 19 | ||
20 | struct cpufreq_stats { | 20 | struct cpufreq_stats { |
21 | unsigned int cpu; | ||
22 | unsigned int total_trans; | 21 | unsigned int total_trans; |
23 | unsigned long long last_time; | 22 | unsigned long long last_time; |
24 | unsigned int max_state; | 23 | unsigned int max_state; |
@@ -31,50 +30,33 @@ struct cpufreq_stats { | |||
31 | #endif | 30 | #endif |
32 | }; | 31 | }; |
33 | 32 | ||
34 | static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table); | 33 | static int cpufreq_stats_update(struct cpufreq_stats *stats) |
35 | |||
36 | struct cpufreq_stats_attribute { | ||
37 | struct attribute attr; | ||
38 | ssize_t(*show) (struct cpufreq_stats *, char *); | ||
39 | }; | ||
40 | |||
41 | static int cpufreq_stats_update(unsigned int cpu) | ||
42 | { | 34 | { |
43 | struct cpufreq_stats *stat; | 35 | unsigned long long cur_time = get_jiffies_64(); |
44 | unsigned long long cur_time; | ||
45 | 36 | ||
46 | cur_time = get_jiffies_64(); | ||
47 | spin_lock(&cpufreq_stats_lock); | 37 | spin_lock(&cpufreq_stats_lock); |
48 | stat = per_cpu(cpufreq_stats_table, cpu); | 38 | stats->time_in_state[stats->last_index] += cur_time - stats->last_time; |
49 | if (stat->time_in_state) | 39 | stats->last_time = cur_time; |
50 | stat->time_in_state[stat->last_index] += | ||
51 | cur_time - stat->last_time; | ||
52 | stat->last_time = cur_time; | ||
53 | spin_unlock(&cpufreq_stats_lock); | 40 | spin_unlock(&cpufreq_stats_lock); |
54 | return 0; | 41 | return 0; |
55 | } | 42 | } |
56 | 43 | ||
57 | static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) | 44 | static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) |
58 | { | 45 | { |
59 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); | 46 | return sprintf(buf, "%d\n", policy->stats->total_trans); |
60 | if (!stat) | ||
61 | return 0; | ||
62 | return sprintf(buf, "%d\n", | ||
63 | per_cpu(cpufreq_stats_table, stat->cpu)->total_trans); | ||
64 | } | 47 | } |
65 | 48 | ||
66 | static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) | 49 | static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) |
67 | { | 50 | { |
51 | struct cpufreq_stats *stats = policy->stats; | ||
68 | ssize_t len = 0; | 52 | ssize_t len = 0; |
69 | int i; | 53 | int i; |
70 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); | 54 | |
71 | if (!stat) | 55 | cpufreq_stats_update(stats); |
72 | return 0; | 56 | for (i = 0; i < stats->state_num; i++) { |
73 | cpufreq_stats_update(stat->cpu); | 57 | len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], |
74 | for (i = 0; i < stat->state_num; i++) { | ||
75 | len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], | ||
76 | (unsigned long long) | 58 | (unsigned long long) |
77 | jiffies_64_to_clock_t(stat->time_in_state[i])); | 59 | jiffies_64_to_clock_t(stats->time_in_state[i])); |
78 | } | 60 | } |
79 | return len; | 61 | return len; |
80 | } | 62 | } |
@@ -82,38 +64,35 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) | |||
82 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 64 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
83 | static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) | 65 | static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) |
84 | { | 66 | { |
67 | struct cpufreq_stats *stats = policy->stats; | ||
85 | ssize_t len = 0; | 68 | ssize_t len = 0; |
86 | int i, j; | 69 | int i, j; |
87 | 70 | ||
88 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); | ||
89 | if (!stat) | ||
90 | return 0; | ||
91 | cpufreq_stats_update(stat->cpu); | ||
92 | len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); | 71 | len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); |
93 | len += snprintf(buf + len, PAGE_SIZE - len, " : "); | 72 | len += snprintf(buf + len, PAGE_SIZE - len, " : "); |
94 | for (i = 0; i < stat->state_num; i++) { | 73 | for (i = 0; i < stats->state_num; i++) { |
95 | if (len >= PAGE_SIZE) | 74 | if (len >= PAGE_SIZE) |
96 | break; | 75 | break; |
97 | len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", | 76 | len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", |
98 | stat->freq_table[i]); | 77 | stats->freq_table[i]); |
99 | } | 78 | } |
100 | if (len >= PAGE_SIZE) | 79 | if (len >= PAGE_SIZE) |
101 | return PAGE_SIZE; | 80 | return PAGE_SIZE; |
102 | 81 | ||
103 | len += snprintf(buf + len, PAGE_SIZE - len, "\n"); | 82 | len += snprintf(buf + len, PAGE_SIZE - len, "\n"); |
104 | 83 | ||
105 | for (i = 0; i < stat->state_num; i++) { | 84 | for (i = 0; i < stats->state_num; i++) { |
106 | if (len >= PAGE_SIZE) | 85 | if (len >= PAGE_SIZE) |
107 | break; | 86 | break; |
108 | 87 | ||
109 | len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", | 88 | len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", |
110 | stat->freq_table[i]); | 89 | stats->freq_table[i]); |
111 | 90 | ||
112 | for (j = 0; j < stat->state_num; j++) { | 91 | for (j = 0; j < stats->state_num; j++) { |
113 | if (len >= PAGE_SIZE) | 92 | if (len >= PAGE_SIZE) |
114 | break; | 93 | break; |
115 | len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", | 94 | len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", |
116 | stat->trans_table[i*stat->max_state+j]); | 95 | stats->trans_table[i*stats->max_state+j]); |
117 | } | 96 | } |
118 | if (len >= PAGE_SIZE) | 97 | if (len >= PAGE_SIZE) |
119 | break; | 98 | break; |
@@ -142,28 +121,29 @@ static struct attribute_group stats_attr_group = { | |||
142 | .name = "stats" | 121 | .name = "stats" |
143 | }; | 122 | }; |
144 | 123 | ||
145 | static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) | 124 | static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq) |
146 | { | 125 | { |
147 | int index; | 126 | int index; |
148 | for (index = 0; index < stat->max_state; index++) | 127 | for (index = 0; index < stats->max_state; index++) |
149 | if (stat->freq_table[index] == freq) | 128 | if (stats->freq_table[index] == freq) |
150 | return index; | 129 | return index; |
151 | return -1; | 130 | return -1; |
152 | } | 131 | } |
153 | 132 | ||
154 | static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) | 133 | static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) |
155 | { | 134 | { |
156 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); | 135 | struct cpufreq_stats *stats = policy->stats; |
157 | 136 | ||
158 | if (!stat) | 137 | /* Already freed */ |
138 | if (!stats) | ||
159 | return; | 139 | return; |
160 | 140 | ||
161 | pr_debug("%s: Free stat table\n", __func__); | 141 | pr_debug("%s: Free stats table\n", __func__); |
162 | 142 | ||
163 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | 143 | sysfs_remove_group(&policy->kobj, &stats_attr_group); |
164 | kfree(stat->time_in_state); | 144 | kfree(stats->time_in_state); |
165 | kfree(stat); | 145 | kfree(stats); |
166 | per_cpu(cpufreq_stats_table, policy->cpu) = NULL; | 146 | policy->stats = NULL; |
167 | } | 147 | } |
168 | 148 | ||
169 | static void cpufreq_stats_free_table(unsigned int cpu) | 149 | static void cpufreq_stats_free_table(unsigned int cpu) |
@@ -174,37 +154,33 @@ static void cpufreq_stats_free_table(unsigned int cpu) | |||
174 | if (!policy) | 154 | if (!policy) |
175 | return; | 155 | return; |
176 | 156 | ||
177 | if (cpufreq_frequency_get_table(policy->cpu)) | 157 | __cpufreq_stats_free_table(policy); |
178 | __cpufreq_stats_free_table(policy); | ||
179 | 158 | ||
180 | cpufreq_cpu_put(policy); | 159 | cpufreq_cpu_put(policy); |
181 | } | 160 | } |
182 | 161 | ||
183 | static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) | 162 | static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) |
184 | { | 163 | { |
185 | unsigned int i, count = 0, ret = 0; | 164 | unsigned int i = 0, count = 0, ret = -ENOMEM; |
186 | struct cpufreq_stats *stat; | 165 | struct cpufreq_stats *stats; |
187 | unsigned int alloc_size; | 166 | unsigned int alloc_size; |
188 | unsigned int cpu = policy->cpu; | 167 | unsigned int cpu = policy->cpu; |
189 | struct cpufreq_frequency_table *pos, *table; | 168 | struct cpufreq_frequency_table *pos, *table; |
190 | 169 | ||
170 | /* We need cpufreq table for creating stats table */ | ||
191 | table = cpufreq_frequency_get_table(cpu); | 171 | table = cpufreq_frequency_get_table(cpu); |
192 | if (unlikely(!table)) | 172 | if (unlikely(!table)) |
193 | return 0; | 173 | return 0; |
194 | 174 | ||
195 | if (per_cpu(cpufreq_stats_table, cpu)) | 175 | /* stats already initialized */ |
196 | return -EBUSY; | 176 | if (policy->stats) |
197 | stat = kzalloc(sizeof(*stat), GFP_KERNEL); | 177 | return -EEXIST; |
198 | if ((stat) == NULL) | ||
199 | return -ENOMEM; | ||
200 | |||
201 | ret = sysfs_create_group(&policy->kobj, &stats_attr_group); | ||
202 | if (ret) | ||
203 | goto error_out; | ||
204 | 178 | ||
205 | stat->cpu = cpu; | 179 | stats = kzalloc(sizeof(*stats), GFP_KERNEL); |
206 | per_cpu(cpufreq_stats_table, cpu) = stat; | 180 | if (!stats) |
181 | return -ENOMEM; | ||
207 | 182 | ||
183 | /* Find total allocation size */ | ||
208 | cpufreq_for_each_valid_entry(pos, table) | 184 | cpufreq_for_each_valid_entry(pos, table) |
209 | count++; | 185 | count++; |
210 | 186 | ||
@@ -213,32 +189,40 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) | |||
213 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 189 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
214 | alloc_size += count * count * sizeof(int); | 190 | alloc_size += count * count * sizeof(int); |
215 | #endif | 191 | #endif |
216 | stat->max_state = count; | 192 | |
217 | stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); | 193 | /* Allocate memory for time_in_state/freq_table/trans_table in one go */ |
218 | if (!stat->time_in_state) { | 194 | stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL); |
219 | ret = -ENOMEM; | 195 | if (!stats->time_in_state) |
220 | goto error_alloc; | 196 | goto free_stat; |
221 | } | 197 | |
222 | stat->freq_table = (unsigned int *)(stat->time_in_state + count); | 198 | stats->freq_table = (unsigned int *)(stats->time_in_state + count); |
223 | 199 | ||
224 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 200 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
225 | stat->trans_table = stat->freq_table + count; | 201 | stats->trans_table = stats->freq_table + count; |
226 | #endif | 202 | #endif |
227 | i = 0; | 203 | |
204 | stats->max_state = count; | ||
205 | |||
206 | /* Find valid-unique entries */ | ||
228 | cpufreq_for_each_valid_entry(pos, table) | 207 | cpufreq_for_each_valid_entry(pos, table) |
229 | if (freq_table_get_index(stat, pos->frequency) == -1) | 208 | if (freq_table_get_index(stats, pos->frequency) == -1) |
230 | stat->freq_table[i++] = pos->frequency; | 209 | stats->freq_table[i++] = pos->frequency; |
231 | stat->state_num = i; | 210 | |
232 | spin_lock(&cpufreq_stats_lock); | 211 | stats->state_num = i; |
233 | stat->last_time = get_jiffies_64(); | 212 | stats->last_time = get_jiffies_64(); |
234 | stat->last_index = freq_table_get_index(stat, policy->cur); | 213 | stats->last_index = freq_table_get_index(stats, policy->cur); |
235 | spin_unlock(&cpufreq_stats_lock); | 214 | |
236 | return 0; | 215 | policy->stats = stats; |
237 | error_alloc: | 216 | ret = sysfs_create_group(&policy->kobj, &stats_attr_group); |
238 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | 217 | if (!ret) |
239 | error_out: | 218 | return 0; |
240 | kfree(stat); | 219 | |
241 | per_cpu(cpufreq_stats_table, cpu) = NULL; | 220 | /* We failed, release resources */ |
221 | policy->stats = NULL; | ||
222 | kfree(stats->time_in_state); | ||
223 | free_stat: | ||
224 | kfree(stats); | ||
225 | |||
242 | return ret; | 226 | return ret; |
243 | } | 227 | } |
244 | 228 | ||
@@ -259,30 +243,12 @@ static void cpufreq_stats_create_table(unsigned int cpu) | |||
259 | cpufreq_cpu_put(policy); | 243 | cpufreq_cpu_put(policy); |
260 | } | 244 | } |
261 | 245 | ||
262 | static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) | ||
263 | { | ||
264 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, | ||
265 | policy->last_cpu); | ||
266 | |||
267 | pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n", | ||
268 | policy->cpu, policy->last_cpu); | ||
269 | per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table, | ||
270 | policy->last_cpu); | ||
271 | per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL; | ||
272 | stat->cpu = policy->cpu; | ||
273 | } | ||
274 | |||
275 | static int cpufreq_stat_notifier_policy(struct notifier_block *nb, | 246 | static int cpufreq_stat_notifier_policy(struct notifier_block *nb, |
276 | unsigned long val, void *data) | 247 | unsigned long val, void *data) |
277 | { | 248 | { |
278 | int ret = 0; | 249 | int ret = 0; |
279 | struct cpufreq_policy *policy = data; | 250 | struct cpufreq_policy *policy = data; |
280 | 251 | ||
281 | if (val == CPUFREQ_UPDATE_POLICY_CPU) { | ||
282 | cpufreq_stats_update_policy_cpu(policy); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | if (val == CPUFREQ_CREATE_POLICY) | 252 | if (val == CPUFREQ_CREATE_POLICY) |
287 | ret = __cpufreq_stats_create_table(policy); | 253 | ret = __cpufreq_stats_create_table(policy); |
288 | else if (val == CPUFREQ_REMOVE_POLICY) | 254 | else if (val == CPUFREQ_REMOVE_POLICY) |
@@ -295,35 +261,45 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb, | |||
295 | unsigned long val, void *data) | 261 | unsigned long val, void *data) |
296 | { | 262 | { |
297 | struct cpufreq_freqs *freq = data; | 263 | struct cpufreq_freqs *freq = data; |
298 | struct cpufreq_stats *stat; | 264 | struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu); |
265 | struct cpufreq_stats *stats; | ||
299 | int old_index, new_index; | 266 | int old_index, new_index; |
300 | 267 | ||
301 | if (val != CPUFREQ_POSTCHANGE) | 268 | if (!policy) { |
269 | pr_err("%s: No policy found\n", __func__); | ||
302 | return 0; | 270 | return 0; |
271 | } | ||
303 | 272 | ||
304 | stat = per_cpu(cpufreq_stats_table, freq->cpu); | 273 | if (val != CPUFREQ_POSTCHANGE) |
305 | if (!stat) | 274 | goto put_policy; |
306 | return 0; | ||
307 | 275 | ||
308 | old_index = stat->last_index; | 276 | if (!policy->stats) { |
309 | new_index = freq_table_get_index(stat, freq->new); | 277 | pr_debug("%s: No stats found\n", __func__); |
278 | goto put_policy; | ||
279 | } | ||
310 | 280 | ||
311 | /* We can't do stat->time_in_state[-1]= .. */ | 281 | stats = policy->stats; |
312 | if (old_index == -1 || new_index == -1) | 282 | |
313 | return 0; | 283 | old_index = stats->last_index; |
284 | new_index = freq_table_get_index(stats, freq->new); | ||
314 | 285 | ||
315 | cpufreq_stats_update(freq->cpu); | 286 | /* We can't do stats->time_in_state[-1]= .. */ |
287 | if (old_index == -1 || new_index == -1) | ||
288 | goto put_policy; | ||
316 | 289 | ||
317 | if (old_index == new_index) | 290 | if (old_index == new_index) |
318 | return 0; | 291 | goto put_policy; |
319 | 292 | ||
320 | spin_lock(&cpufreq_stats_lock); | 293 | cpufreq_stats_update(stats); |
321 | stat->last_index = new_index; | 294 | |
295 | stats->last_index = new_index; | ||
322 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 296 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
323 | stat->trans_table[old_index * stat->max_state + new_index]++; | 297 | stats->trans_table[old_index * stats->max_state + new_index]++; |
324 | #endif | 298 | #endif |
325 | stat->total_trans++; | 299 | stats->total_trans++; |
326 | spin_unlock(&cpufreq_stats_lock); | 300 | |
301 | put_policy: | ||
302 | cpufreq_cpu_put(policy); | ||
327 | return 0; | 303 | return 0; |
328 | } | 304 | } |
329 | 305 | ||
@@ -374,8 +350,7 @@ static void __exit cpufreq_stats_exit(void) | |||
374 | } | 350 | } |
375 | 351 | ||
376 | MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); | 352 | MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); |
377 | MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats " | 353 | MODULE_DESCRIPTION("Export cpufreq stats via sysfs"); |
378 | "through sysfs filesystem"); | ||
379 | MODULE_LICENSE("GPL"); | 354 | MODULE_LICENSE("GPL"); |
380 | 355 | ||
381 | module_init(cpufreq_stats_init); | 356 | module_init(cpufreq_stats_init); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 742eefba12c2..872c5772c5d3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -148,6 +148,8 @@ struct perf_limits { | |||
148 | int32_t min_perf; | 148 | int32_t min_perf; |
149 | int max_policy_pct; | 149 | int max_policy_pct; |
150 | int max_sysfs_pct; | 150 | int max_sysfs_pct; |
151 | int min_policy_pct; | ||
152 | int min_sysfs_pct; | ||
151 | }; | 153 | }; |
152 | 154 | ||
153 | static struct perf_limits limits = { | 155 | static struct perf_limits limits = { |
@@ -159,6 +161,8 @@ static struct perf_limits limits = { | |||
159 | .min_perf = 0, | 161 | .min_perf = 0, |
160 | .max_policy_pct = 100, | 162 | .max_policy_pct = 100, |
161 | .max_sysfs_pct = 100, | 163 | .max_sysfs_pct = 100, |
164 | .min_policy_pct = 0, | ||
165 | .min_sysfs_pct = 0, | ||
162 | }; | 166 | }; |
163 | 167 | ||
164 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 168 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
@@ -338,6 +342,33 @@ static void __init intel_pstate_debug_expose_params(void) | |||
338 | return sprintf(buf, "%u\n", limits.object); \ | 342 | return sprintf(buf, "%u\n", limits.object); \ |
339 | } | 343 | } |
340 | 344 | ||
345 | static ssize_t show_turbo_pct(struct kobject *kobj, | ||
346 | struct attribute *attr, char *buf) | ||
347 | { | ||
348 | struct cpudata *cpu; | ||
349 | int total, no_turbo, turbo_pct; | ||
350 | uint32_t turbo_fp; | ||
351 | |||
352 | cpu = all_cpu_data[0]; | ||
353 | |||
354 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | ||
355 | no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; | ||
356 | turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); | ||
357 | turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); | ||
358 | return sprintf(buf, "%u\n", turbo_pct); | ||
359 | } | ||
360 | |||
361 | static ssize_t show_num_pstates(struct kobject *kobj, | ||
362 | struct attribute *attr, char *buf) | ||
363 | { | ||
364 | struct cpudata *cpu; | ||
365 | int total; | ||
366 | |||
367 | cpu = all_cpu_data[0]; | ||
368 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; | ||
369 | return sprintf(buf, "%u\n", total); | ||
370 | } | ||
371 | |||
341 | static ssize_t show_no_turbo(struct kobject *kobj, | 372 | static ssize_t show_no_turbo(struct kobject *kobj, |
342 | struct attribute *attr, char *buf) | 373 | struct attribute *attr, char *buf) |
343 | { | 374 | { |
@@ -404,7 +435,9 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | |||
404 | ret = sscanf(buf, "%u", &input); | 435 | ret = sscanf(buf, "%u", &input); |
405 | if (ret != 1) | 436 | if (ret != 1) |
406 | return -EINVAL; | 437 | return -EINVAL; |
407 | limits.min_perf_pct = clamp_t(int, input, 0 , 100); | 438 | |
439 | limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); | ||
440 | limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); | ||
408 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | 441 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
409 | 442 | ||
410 | if (hwp_active) | 443 | if (hwp_active) |
@@ -418,11 +451,15 @@ show_one(min_perf_pct, min_perf_pct); | |||
418 | define_one_global_rw(no_turbo); | 451 | define_one_global_rw(no_turbo); |
419 | define_one_global_rw(max_perf_pct); | 452 | define_one_global_rw(max_perf_pct); |
420 | define_one_global_rw(min_perf_pct); | 453 | define_one_global_rw(min_perf_pct); |
454 | define_one_global_ro(turbo_pct); | ||
455 | define_one_global_ro(num_pstates); | ||
421 | 456 | ||
422 | static struct attribute *intel_pstate_attributes[] = { | 457 | static struct attribute *intel_pstate_attributes[] = { |
423 | &no_turbo.attr, | 458 | &no_turbo.attr, |
424 | &max_perf_pct.attr, | 459 | &max_perf_pct.attr, |
425 | &min_perf_pct.attr, | 460 | &min_perf_pct.attr, |
461 | &turbo_pct.attr, | ||
462 | &num_pstates.attr, | ||
426 | NULL | 463 | NULL |
427 | }; | 464 | }; |
428 | 465 | ||
@@ -825,6 +862,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | |||
825 | ICPU(0x46, core_params), | 862 | ICPU(0x46, core_params), |
826 | ICPU(0x47, core_params), | 863 | ICPU(0x47, core_params), |
827 | ICPU(0x4c, byt_params), | 864 | ICPU(0x4c, byt_params), |
865 | ICPU(0x4e, core_params), | ||
828 | ICPU(0x4f, core_params), | 866 | ICPU(0x4f, core_params), |
829 | ICPU(0x56, core_params), | 867 | ICPU(0x56, core_params), |
830 | {} | 868 | {} |
@@ -887,7 +925,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
887 | if (!policy->cpuinfo.max_freq) | 925 | if (!policy->cpuinfo.max_freq) |
888 | return -ENODEV; | 926 | return -ENODEV; |
889 | 927 | ||
890 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { | 928 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && |
929 | policy->max >= policy->cpuinfo.max_freq) { | ||
930 | limits.min_policy_pct = 100; | ||
891 | limits.min_perf_pct = 100; | 931 | limits.min_perf_pct = 100; |
892 | limits.min_perf = int_tofp(1); | 932 | limits.min_perf = int_tofp(1); |
893 | limits.max_policy_pct = 100; | 933 | limits.max_policy_pct = 100; |
@@ -897,8 +937,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
897 | return 0; | 937 | return 0; |
898 | } | 938 | } |
899 | 939 | ||
900 | limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; | 940 | limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; |
901 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); | 941 | limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); |
942 | limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); | ||
902 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | 943 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
903 | 944 | ||
904 | limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; | 945 | limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; |
@@ -978,6 +1019,7 @@ static struct cpufreq_driver intel_pstate_driver = { | |||
978 | 1019 | ||
979 | static int __initdata no_load; | 1020 | static int __initdata no_load; |
980 | static int __initdata no_hwp; | 1021 | static int __initdata no_hwp; |
1022 | static int __initdata hwp_only; | ||
981 | static unsigned int force_load; | 1023 | static unsigned int force_load; |
982 | 1024 | ||
983 | static int intel_pstate_msrs_not_valid(void) | 1025 | static int intel_pstate_msrs_not_valid(void) |
@@ -1175,6 +1217,9 @@ static int __init intel_pstate_init(void) | |||
1175 | if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp) | 1217 | if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp) |
1176 | intel_pstate_hwp_enable(); | 1218 | intel_pstate_hwp_enable(); |
1177 | 1219 | ||
1220 | if (!hwp_active && hwp_only) | ||
1221 | goto out; | ||
1222 | |||
1178 | rc = cpufreq_register_driver(&intel_pstate_driver); | 1223 | rc = cpufreq_register_driver(&intel_pstate_driver); |
1179 | if (rc) | 1224 | if (rc) |
1180 | goto out; | 1225 | goto out; |
@@ -1209,6 +1254,8 @@ static int __init intel_pstate_setup(char *str) | |||
1209 | no_hwp = 1; | 1254 | no_hwp = 1; |
1210 | if (!strcmp(str, "force")) | 1255 | if (!strcmp(str, "force")) |
1211 | force_load = 1; | 1256 | force_load = 1; |
1257 | if (!strcmp(str, "hwp_only")) | ||
1258 | hwp_only = 1; | ||
1212 | return 0; | 1259 | return 0; |
1213 | } | 1260 | } |
1214 | early_param("intel_pstate", intel_pstate_setup); | 1261 | early_param("intel_pstate", intel_pstate_setup); |
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c index 25fbd6a1374f..f0913eee2f50 100644 --- a/drivers/cpufreq/ls1x-cpufreq.c +++ b/drivers/cpufreq/ls1x-cpufreq.c | |||
@@ -210,7 +210,6 @@ out: | |||
210 | static struct platform_driver ls1x_cpufreq_platdrv = { | 210 | static struct platform_driver ls1x_cpufreq_platdrv = { |
211 | .driver = { | 211 | .driver = { |
212 | .name = "ls1x-cpufreq", | 212 | .name = "ls1x-cpufreq", |
213 | .owner = THIS_MODULE, | ||
214 | }, | 213 | }, |
215 | .probe = ls1x_cpufreq_probe, | 214 | .probe = ls1x_cpufreq_probe, |
216 | .remove = ls1x_cpufreq_remove, | 215 | .remove = ls1x_cpufreq_remove, |
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c new file mode 100644 index 000000000000..ffa3389e535b --- /dev/null +++ b/drivers/cpufreq/sfi-cpufreq.c | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * SFI Performance States Driver | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com> | ||
14 | * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com> | ||
15 | */ | ||
16 | |||
17 | #include <linux/cpufreq.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/sfi.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/smp.h> | ||
24 | |||
25 | #include <asm/msr.h> | ||
26 | |||
27 | struct cpufreq_frequency_table *freq_table; | ||
28 | static struct sfi_freq_table_entry *sfi_cpufreq_array; | ||
29 | static int num_freq_table_entries; | ||
30 | |||
31 | static int sfi_parse_freq(struct sfi_table_header *table) | ||
32 | { | ||
33 | struct sfi_table_simple *sb; | ||
34 | struct sfi_freq_table_entry *pentry; | ||
35 | int totallen; | ||
36 | |||
37 | sb = (struct sfi_table_simple *)table; | ||
38 | num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb, | ||
39 | struct sfi_freq_table_entry); | ||
40 | if (num_freq_table_entries <= 1) { | ||
41 | pr_err("No p-states discovered\n"); | ||
42 | return -ENODEV; | ||
43 | } | ||
44 | |||
45 | pentry = (struct sfi_freq_table_entry *)sb->pentry; | ||
46 | totallen = num_freq_table_entries * sizeof(*pentry); | ||
47 | |||
48 | sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL); | ||
49 | if (!sfi_cpufreq_array) | ||
50 | return -ENOMEM; | ||
51 | |||
52 | memcpy(sfi_cpufreq_array, pentry, totallen); | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) | ||
58 | { | ||
59 | unsigned int next_perf_state = 0; /* Index into perf table */ | ||
60 | u32 lo, hi; | ||
61 | |||
62 | next_perf_state = policy->freq_table[index].driver_data; | ||
63 | |||
64 | rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi); | ||
65 | lo = (lo & ~INTEL_PERF_CTL_MASK) | | ||
66 | ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val & | ||
67 | INTEL_PERF_CTL_MASK); | ||
68 | wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy) | ||
74 | { | ||
75 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; | ||
76 | policy->cpuinfo.transition_latency = 100000; /* 100us */ | ||
77 | |||
78 | return cpufreq_table_validate_and_show(policy, freq_table); | ||
79 | } | ||
80 | |||
81 | static struct cpufreq_driver sfi_cpufreq_driver = { | ||
82 | .flags = CPUFREQ_CONST_LOOPS, | ||
83 | .verify = cpufreq_generic_frequency_table_verify, | ||
84 | .target_index = sfi_cpufreq_target, | ||
85 | .init = sfi_cpufreq_cpu_init, | ||
86 | .name = "sfi-cpufreq", | ||
87 | .attr = cpufreq_generic_attr, | ||
88 | }; | ||
89 | |||
90 | static int __init sfi_cpufreq_init(void) | ||
91 | { | ||
92 | int ret, i; | ||
93 | |||
94 | /* parse the freq table from SFI */ | ||
95 | ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq); | ||
96 | if (ret) | ||
97 | return ret; | ||
98 | |||
99 | freq_table = kzalloc(sizeof(*freq_table) * | ||
100 | (num_freq_table_entries + 1), GFP_KERNEL); | ||
101 | if (!freq_table) { | ||
102 | ret = -ENOMEM; | ||
103 | goto err_free_array; | ||
104 | } | ||
105 | |||
106 | for (i = 0; i < num_freq_table_entries; i++) { | ||
107 | freq_table[i].driver_data = i; | ||
108 | freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000; | ||
109 | } | ||
110 | freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
111 | |||
112 | ret = cpufreq_register_driver(&sfi_cpufreq_driver); | ||
113 | if (ret) | ||
114 | goto err_free_tbl; | ||
115 | |||
116 | return ret; | ||
117 | |||
118 | err_free_tbl: | ||
119 | kfree(freq_table); | ||
120 | err_free_array: | ||
121 | kfree(sfi_cpufreq_array); | ||
122 | return ret; | ||
123 | } | ||
124 | late_initcall(sfi_cpufreq_init); | ||
125 | |||
126 | static void __exit sfi_cpufreq_exit(void) | ||
127 | { | ||
128 | cpufreq_unregister_driver(&sfi_cpufreq_driver); | ||
129 | kfree(freq_table); | ||
130 | kfree(sfi_cpufreq_array); | ||
131 | } | ||
132 | module_exit(sfi_cpufreq_exit); | ||
133 | |||
134 | MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>"); | ||
135 | MODULE_DESCRIPTION("SFI Performance-States Driver"); | ||
136 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 7047821a7f8a..4ab7a2156672 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c | |||
@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
400 | 400 | ||
401 | pr_debug("previous speed is %u\n", prev_speed); | 401 | pr_debug("previous speed is %u\n", prev_speed); |
402 | 402 | ||
403 | preempt_disable(); | ||
403 | local_irq_save(flags); | 404 | local_irq_save(flags); |
404 | 405 | ||
405 | /* switch to low state */ | 406 | /* switch to low state */ |
@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, | |||
464 | 465 | ||
465 | out: | 466 | out: |
466 | local_irq_restore(flags); | 467 | local_irq_restore(flags); |
468 | preempt_enable(); | ||
469 | |||
467 | return ret; | 470 | return ret; |
468 | } | 471 | } |
469 | EXPORT_SYMBOL_GPL(speedstep_get_freqs); | 472 | EXPORT_SYMBOL_GPL(speedstep_get_freqs); |
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 5fc96d5d656b..819229e824fb 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c | |||
@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state) | |||
156 | return; | 156 | return; |
157 | 157 | ||
158 | /* Disable IRQs */ | 158 | /* Disable IRQs */ |
159 | preempt_disable(); | ||
159 | local_irq_save(flags); | 160 | local_irq_save(flags); |
160 | 161 | ||
161 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); | 162 | command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); |
@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state) | |||
166 | 167 | ||
167 | do { | 168 | do { |
168 | if (retry) { | 169 | if (retry) { |
170 | /* | ||
171 | * We need to enable interrupts, otherwise the blockage | ||
172 | * won't resolve. | ||
173 | * | ||
174 | * We disable preemption so that other processes don't | ||
175 | * run. If other processes were running, they could | ||
176 | * submit more DMA requests, making the blockage worse. | ||
177 | */ | ||
169 | pr_debug("retry %u, previous result %u, waiting...\n", | 178 | pr_debug("retry %u, previous result %u, waiting...\n", |
170 | retry, result); | 179 | retry, result); |
180 | local_irq_enable(); | ||
171 | mdelay(retry * 50); | 181 | mdelay(retry * 50); |
182 | local_irq_disable(); | ||
172 | } | 183 | } |
173 | retry++; | 184 | retry++; |
174 | __asm__ __volatile__( | 185 | __asm__ __volatile__( |
@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state) | |||
185 | 196 | ||
186 | /* enable IRQs */ | 197 | /* enable IRQs */ |
187 | local_irq_restore(flags); | 198 | local_irq_restore(flags); |
199 | preempt_enable(); | ||
188 | 200 | ||
189 | if (new_state == state) | 201 | if (new_state == state) |
190 | pr_debug("change to %u MHz succeeded after %u tries " | 202 | pr_debug("change to %u MHz succeeded after %u tries " |
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index e3e225fe6b45..40c34faffe59 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c | |||
@@ -182,6 +182,10 @@ static int __init bl_idle_init(void) | |||
182 | */ | 182 | */ |
183 | if (!of_match_node(compatible_machine_match, root)) | 183 | if (!of_match_node(compatible_machine_match, root)) |
184 | return -ENODEV; | 184 | return -ENODEV; |
185 | |||
186 | if (!mcpm_is_available()) | ||
187 | return -EUNATCH; | ||
188 | |||
185 | /* | 189 | /* |
186 | * For now the differentiation between little and big cores | 190 | * For now the differentiation between little and big cores |
187 | * is based on the part number. A7 cores are considered little | 191 | * is based on the part number. A7 cores are considered little |
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 51dccb3620ea..64281bb2f650 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | menuconfig PM_DEVFREQ | 1 | menuconfig PM_DEVFREQ |
2 | bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" | 2 | bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" |
3 | select SRCU | ||
3 | help | 4 | help |
4 | A device may have a list of frequencies and voltages available. | 5 | A device may have a list of frequencies and voltages available. |
5 | devfreq, a generic DVFS framework can be registered for a device | 6 | devfreq, a generic DVFS framework can be registered for a device |
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index de361a156b34..5a635646e05c 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c | |||
@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, | |||
43 | { | 43 | { |
44 | const struct acpi_csrt_shared_info *si; | 44 | const struct acpi_csrt_shared_info *si; |
45 | struct list_head resource_list; | 45 | struct list_head resource_list; |
46 | struct resource_list_entry *rentry; | 46 | struct resource_entry *rentry; |
47 | resource_size_t mem = 0, irq = 0; | 47 | resource_size_t mem = 0, irq = 0; |
48 | int ret; | 48 | int ret; |
49 | 49 | ||
@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, | |||
56 | return 0; | 56 | return 0; |
57 | 57 | ||
58 | list_for_each_entry(rentry, &resource_list, node) { | 58 | list_for_each_entry(rentry, &resource_list, node) { |
59 | if (resource_type(&rentry->res) == IORESOURCE_MEM) | 59 | if (resource_type(rentry->res) == IORESOURCE_MEM) |
60 | mem = rentry->res.start; | 60 | mem = rentry->res->start; |
61 | else if (resource_type(&rentry->res) == IORESOURCE_IRQ) | 61 | else if (resource_type(rentry->res) == IORESOURCE_IRQ) |
62 | irq = rentry->res.start; | 62 | irq = rentry->res->start; |
63 | } | 63 | } |
64 | 64 | ||
65 | acpi_dev_free_resource_list(&resource_list); | 65 | acpi_dev_free_resource_list(&resource_list); |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 49c265255a07..cb59619df23f 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -385,4 +385,11 @@ config EDAC_ALTERA_MC | |||
385 | preloader must initialize the SDRAM before loading | 385 | preloader must initialize the SDRAM before loading |
386 | the kernel. | 386 | the kernel. |
387 | 387 | ||
388 | config EDAC_SYNOPSYS | ||
389 | tristate "Synopsys DDR Memory Controller" | ||
390 | depends on EDAC_MM_EDAC && ARCH_ZYNQ | ||
391 | help | ||
392 | Support for error detection and correction on the Synopsys DDR | ||
393 | memory controller. | ||
394 | |||
388 | endif # EDAC | 395 | endif # EDAC |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index d40c69a04df7..b255f362b1db 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
@@ -67,3 +67,4 @@ obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o | |||
67 | obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o | 67 | obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o |
68 | 68 | ||
69 | obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o | 69 | obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o |
70 | obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o | ||
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 670d2829c547..c84eecb191ef 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -157,7 +157,7 @@ struct dev_ch_attribute { | |||
157 | }; | 157 | }; |
158 | 158 | ||
159 | #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ | 159 | #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ |
160 | struct dev_ch_attribute dev_attr_legacy_##_name = \ | 160 | static struct dev_ch_attribute dev_attr_legacy_##_name = \ |
161 | { __ATTR(_name, _mode, _show, _store), (_var) } | 161 | { __ATTR(_name, _mode, _show, _store), (_var) } |
162 | 162 | ||
163 | #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) | 163 | #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) |
@@ -850,20 +850,20 @@ static const struct file_operations debug_fake_inject_fops = { | |||
850 | #endif | 850 | #endif |
851 | 851 | ||
852 | /* default Control file */ | 852 | /* default Control file */ |
853 | DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); | 853 | static DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); |
854 | 854 | ||
855 | /* default Attribute files */ | 855 | /* default Attribute files */ |
856 | DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); | 856 | static DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); |
857 | DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); | 857 | static DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); |
858 | DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); | 858 | static DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); |
859 | DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); | 859 | static DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); |
860 | DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); | 860 | static DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); |
861 | DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); | 861 | static DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); |
862 | DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); | 862 | static DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); |
863 | DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); | 863 | static DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); |
864 | 864 | ||
865 | /* memory scrubber attribute file */ | 865 | /* memory scrubber attribute file */ |
866 | DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL); | 866 | static DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL); |
867 | 867 | ||
868 | static struct attribute *mci_attrs[] = { | 868 | static struct attribute *mci_attrs[] = { |
869 | &dev_attr_reset_counters.attr, | 869 | &dev_attr_reset_counters.attr, |
@@ -989,7 +989,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
989 | 989 | ||
990 | err = bus_register(mci->bus); | 990 | err = bus_register(mci->bus); |
991 | if (err < 0) | 991 | if (err < 0) |
992 | return err; | 992 | goto fail_free_name; |
993 | 993 | ||
994 | /* get the /sys/devices/system/edac subsys reference */ | 994 | /* get the /sys/devices/system/edac subsys reference */ |
995 | mci->dev.type = &mci_attr_type; | 995 | mci->dev.type = &mci_attr_type; |
@@ -1005,9 +1005,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1005 | err = device_add(&mci->dev); | 1005 | err = device_add(&mci->dev); |
1006 | if (err < 0) { | 1006 | if (err < 0) { |
1007 | edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); | 1007 | edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev)); |
1008 | bus_unregister(mci->bus); | 1008 | goto fail_unregister_bus; |
1009 | kfree(mci->bus->name); | ||
1010 | return err; | ||
1011 | } | 1009 | } |
1012 | 1010 | ||
1013 | if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { | 1011 | if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) { |
@@ -1015,15 +1013,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1015 | dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; | 1013 | dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO; |
1016 | dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; | 1014 | dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show; |
1017 | } | 1015 | } |
1016 | |||
1018 | if (mci->set_sdram_scrub_rate) { | 1017 | if (mci->set_sdram_scrub_rate) { |
1019 | dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; | 1018 | dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR; |
1020 | dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; | 1019 | dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store; |
1021 | } | 1020 | } |
1022 | err = device_create_file(&mci->dev, | 1021 | |
1023 | &dev_attr_sdram_scrub_rate); | 1022 | err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate); |
1024 | if (err) { | 1023 | if (err) { |
1025 | edac_dbg(1, "failure: create sdram_scrub_rate\n"); | 1024 | edac_dbg(1, "failure: create sdram_scrub_rate\n"); |
1026 | goto fail2; | 1025 | goto fail_unregister_dev; |
1027 | } | 1026 | } |
1028 | } | 1027 | } |
1029 | /* | 1028 | /* |
@@ -1032,8 +1031,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1032 | for (i = 0; i < mci->tot_dimms; i++) { | 1031 | for (i = 0; i < mci->tot_dimms; i++) { |
1033 | struct dimm_info *dimm = mci->dimms[i]; | 1032 | struct dimm_info *dimm = mci->dimms[i]; |
1034 | /* Only expose populated DIMMs */ | 1033 | /* Only expose populated DIMMs */ |
1035 | if (dimm->nr_pages == 0) | 1034 | if (!dimm->nr_pages) |
1036 | continue; | 1035 | continue; |
1036 | |||
1037 | #ifdef CONFIG_EDAC_DEBUG | 1037 | #ifdef CONFIG_EDAC_DEBUG |
1038 | edac_dbg(1, "creating dimm%d, located at ", i); | 1038 | edac_dbg(1, "creating dimm%d, located at ", i); |
1039 | if (edac_debug_level >= 1) { | 1039 | if (edac_debug_level >= 1) { |
@@ -1048,14 +1048,14 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1048 | err = edac_create_dimm_object(mci, dimm, i); | 1048 | err = edac_create_dimm_object(mci, dimm, i); |
1049 | if (err) { | 1049 | if (err) { |
1050 | edac_dbg(1, "failure: create dimm %d obj\n", i); | 1050 | edac_dbg(1, "failure: create dimm %d obj\n", i); |
1051 | goto fail; | 1051 | goto fail_unregister_dimm; |
1052 | } | 1052 | } |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | #ifdef CONFIG_EDAC_LEGACY_SYSFS | 1055 | #ifdef CONFIG_EDAC_LEGACY_SYSFS |
1056 | err = edac_create_csrow_objects(mci); | 1056 | err = edac_create_csrow_objects(mci); |
1057 | if (err < 0) | 1057 | if (err < 0) |
1058 | goto fail; | 1058 | goto fail_unregister_dimm; |
1059 | #endif | 1059 | #endif |
1060 | 1060 | ||
1061 | #ifdef CONFIG_EDAC_DEBUG | 1061 | #ifdef CONFIG_EDAC_DEBUG |
@@ -1063,16 +1063,19 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1063 | #endif | 1063 | #endif |
1064 | return 0; | 1064 | return 0; |
1065 | 1065 | ||
1066 | fail: | 1066 | fail_unregister_dimm: |
1067 | for (i--; i >= 0; i--) { | 1067 | for (i--; i >= 0; i--) { |
1068 | struct dimm_info *dimm = mci->dimms[i]; | 1068 | struct dimm_info *dimm = mci->dimms[i]; |
1069 | if (dimm->nr_pages == 0) | 1069 | if (!dimm->nr_pages) |
1070 | continue; | 1070 | continue; |
1071 | |||
1071 | device_unregister(&dimm->dev); | 1072 | device_unregister(&dimm->dev); |
1072 | } | 1073 | } |
1073 | fail2: | 1074 | fail_unregister_dev: |
1074 | device_unregister(&mci->dev); | 1075 | device_unregister(&mci->dev); |
1076 | fail_unregister_bus: | ||
1075 | bus_unregister(mci->bus); | 1077 | bus_unregister(mci->bus); |
1078 | fail_free_name: | ||
1076 | kfree(mci->bus->name); | 1079 | kfree(mci->bus->name); |
1077 | return err; | 1080 | return err; |
1078 | } | 1081 | } |
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 6247d186177e..e9f8a393915a 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c | |||
@@ -279,11 +279,6 @@ static inline u32 i5100_recmema_rank(u32 a) | |||
279 | return i5100_nrecmema_rank(a); | 279 | return i5100_nrecmema_rank(a); |
280 | } | 280 | } |
281 | 281 | ||
282 | static inline u32 i5100_recmema_dm_buf_id(u32 a) | ||
283 | { | ||
284 | return i5100_nrecmema_dm_buf_id(a); | ||
285 | } | ||
286 | |||
287 | static inline u32 i5100_recmemb_cas(u32 a) | 282 | static inline u32 i5100_recmemb_cas(u32 a) |
288 | { | 283 | { |
289 | return i5100_nrecmemb_cas(a); | 284 | return i5100_nrecmemb_cas(a); |
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c index 0bd91a802c67..f7681b553fd5 100644 --- a/drivers/edac/mce_amd_inj.c +++ b/drivers/edac/mce_amd_inj.c | |||
@@ -197,7 +197,7 @@ static int inj_bank_get(void *data, u64 *val) | |||
197 | 197 | ||
198 | DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n"); | 198 | DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n"); |
199 | 199 | ||
200 | struct dfs_node { | 200 | static struct dfs_node { |
201 | char *name; | 201 | char *name; |
202 | struct dentry *d; | 202 | struct dentry *d; |
203 | const struct file_operations *fops; | 203 | const struct file_operations *fops; |
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index ffb1a9a15ccd..1fa76a588af3 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale MPC85xx Memory Controller kenel module | 2 | * Freescale MPC85xx Memory Controller kernel module |
3 | * | 3 | * |
4 | * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc. | 4 | * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc. |
5 | * | 5 | * |
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h index 8c6256436227..4498baf9ce05 100644 --- a/drivers/edac/mpc85xx_edac.h +++ b/drivers/edac/mpc85xx_edac.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale MPC85xx Memory Controller kenel module | 2 | * Freescale MPC85xx Memory Controller kernel module |
3 | * Author: Dave Jiang <djiang@mvista.com> | 3 | * Author: Dave Jiang <djiang@mvista.com> |
4 | * | 4 | * |
5 | * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under | 5 | * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under |
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index 6366e880f978..0574e1bbe45c 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c | |||
@@ -789,7 +789,8 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) | |||
789 | ctl = (ctl & 0xff00ffff) | 0x10000; | 789 | ctl = (ctl & 0xff00ffff) | 0x10000; |
790 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); | 790 | out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); |
791 | 791 | ||
792 | if (edac_mc_add_mc(mci)) { | 792 | res = edac_mc_add_mc(mci); |
793 | if (res) { | ||
793 | edac_dbg(3, "failed edac_mc_add_mc()\n"); | 794 | edac_dbg(3, "failed edac_mc_add_mc()\n"); |
794 | goto err; | 795 | goto err; |
795 | } | 796 | } |
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c new file mode 100644 index 000000000000..1c9691535e13 --- /dev/null +++ b/drivers/edac/synopsys_edac.c | |||
@@ -0,0 +1,535 @@ | |||
1 | /* | ||
2 | * Synopsys DDR ECC Driver | ||
3 | * This driver is based on ppc4xx_edac.c drivers | ||
4 | * | ||
5 | * Copyright (C) 2012 - 2014 Xilinx, Inc. | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation, either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * This file is subject to the terms and conditions of the GNU General Public | ||
18 | * License. See the file "COPYING" in the main directory of this archive | ||
19 | * for more details | ||
20 | */ | ||
21 | |||
22 | #include <linux/edac.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | #include "edac_core.h" | ||
27 | |||
28 | /* Number of cs_rows needed per memory controller */ | ||
29 | #define SYNPS_EDAC_NR_CSROWS 1 | ||
30 | |||
31 | /* Number of channels per memory controller */ | ||
32 | #define SYNPS_EDAC_NR_CHANS 1 | ||
33 | |||
34 | /* Granularity of reported error in bytes */ | ||
35 | #define SYNPS_EDAC_ERR_GRAIN 1 | ||
36 | |||
37 | #define SYNPS_EDAC_MSG_SIZE 256 | ||
38 | |||
39 | #define SYNPS_EDAC_MOD_STRING "synps_edac" | ||
40 | #define SYNPS_EDAC_MOD_VER "1" | ||
41 | |||
42 | /* Synopsys DDR memory controller registers that are relevant to ECC */ | ||
43 | #define CTRL_OFST 0x0 | ||
44 | #define T_ZQ_OFST 0xA4 | ||
45 | |||
46 | /* ECC control register */ | ||
47 | #define ECC_CTRL_OFST 0xC4 | ||
48 | /* ECC log register */ | ||
49 | #define CE_LOG_OFST 0xC8 | ||
50 | /* ECC address register */ | ||
51 | #define CE_ADDR_OFST 0xCC | ||
52 | /* ECC data[31:0] register */ | ||
53 | #define CE_DATA_31_0_OFST 0xD0 | ||
54 | |||
55 | /* Uncorrectable error info registers */ | ||
56 | #define UE_LOG_OFST 0xDC | ||
57 | #define UE_ADDR_OFST 0xE0 | ||
58 | #define UE_DATA_31_0_OFST 0xE4 | ||
59 | |||
60 | #define STAT_OFST 0xF0 | ||
61 | #define SCRUB_OFST 0xF4 | ||
62 | |||
63 | /* Control register bit field definitions */ | ||
64 | #define CTRL_BW_MASK 0xC | ||
65 | #define CTRL_BW_SHIFT 2 | ||
66 | |||
67 | #define DDRCTL_WDTH_16 1 | ||
68 | #define DDRCTL_WDTH_32 0 | ||
69 | |||
70 | /* ZQ register bit field definitions */ | ||
71 | #define T_ZQ_DDRMODE_MASK 0x2 | ||
72 | |||
73 | /* ECC control register bit field definitions */ | ||
74 | #define ECC_CTRL_CLR_CE_ERR 0x2 | ||
75 | #define ECC_CTRL_CLR_UE_ERR 0x1 | ||
76 | |||
77 | /* ECC correctable/uncorrectable error log register definitions */ | ||
78 | #define LOG_VALID 0x1 | ||
79 | #define CE_LOG_BITPOS_MASK 0xFE | ||
80 | #define CE_LOG_BITPOS_SHIFT 1 | ||
81 | |||
82 | /* ECC correctable/uncorrectable error address register definitions */ | ||
83 | #define ADDR_COL_MASK 0xFFF | ||
84 | #define ADDR_ROW_MASK 0xFFFF000 | ||
85 | #define ADDR_ROW_SHIFT 12 | ||
86 | #define ADDR_BANK_MASK 0x70000000 | ||
87 | #define ADDR_BANK_SHIFT 28 | ||
88 | |||
89 | /* ECC statistic register definitions */ | ||
90 | #define STAT_UECNT_MASK 0xFF | ||
91 | #define STAT_CECNT_MASK 0xFF00 | ||
92 | #define STAT_CECNT_SHIFT 8 | ||
93 | |||
94 | /* ECC scrub register definitions */ | ||
95 | #define SCRUB_MODE_MASK 0x7 | ||
96 | #define SCRUB_MODE_SECDED 0x4 | ||
97 | |||
98 | /** | ||
99 | * struct ecc_error_info - ECC error log information | ||
100 | * @row: Row number | ||
101 | * @col: Column number | ||
102 | * @bank: Bank number | ||
103 | * @bitpos: Bit position | ||
104 | * @data: Data causing the error | ||
105 | */ | ||
106 | struct ecc_error_info { | ||
107 | u32 row; | ||
108 | u32 col; | ||
109 | u32 bank; | ||
110 | u32 bitpos; | ||
111 | u32 data; | ||
112 | }; | ||
113 | |||
114 | /** | ||
115 | * struct synps_ecc_status - ECC status information to report | ||
116 | * @ce_cnt: Correctable error count | ||
117 | * @ue_cnt: Uncorrectable error count | ||
118 | * @ceinfo: Correctable error log information | ||
119 | * @ueinfo: Uncorrectable error log information | ||
120 | */ | ||
121 | struct synps_ecc_status { | ||
122 | u32 ce_cnt; | ||
123 | u32 ue_cnt; | ||
124 | struct ecc_error_info ceinfo; | ||
125 | struct ecc_error_info ueinfo; | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * struct synps_edac_priv - DDR memory controller private instance data | ||
130 | * @baseaddr: Base address of the DDR controller | ||
131 | * @message: Buffer for framing the event specific info | ||
132 | * @stat: ECC status information | ||
133 | * @ce_cnt: Correctable Error count | ||
134 | * @ue_cnt: Uncorrectable Error count | ||
135 | */ | ||
136 | struct synps_edac_priv { | ||
137 | void __iomem *baseaddr; | ||
138 | char message[SYNPS_EDAC_MSG_SIZE]; | ||
139 | struct synps_ecc_status stat; | ||
140 | u32 ce_cnt; | ||
141 | u32 ue_cnt; | ||
142 | }; | ||
143 | |||
144 | /** | ||
145 | * synps_edac_geterror_info - Get the current ecc error info | ||
146 | * @base: Pointer to the base address of the ddr memory controller | ||
147 | * @p: Pointer to the synopsys ecc status structure | ||
148 | * | ||
149 | * Determines there is any ecc error or not | ||
150 | * | ||
151 | * Return: one if there is no error otherwise returns zero | ||
152 | */ | ||
153 | static int synps_edac_geterror_info(void __iomem *base, | ||
154 | struct synps_ecc_status *p) | ||
155 | { | ||
156 | u32 regval, clearval = 0; | ||
157 | |||
158 | regval = readl(base + STAT_OFST); | ||
159 | if (!regval) | ||
160 | return 1; | ||
161 | |||
162 | p->ce_cnt = (regval & STAT_CECNT_MASK) >> STAT_CECNT_SHIFT; | ||
163 | p->ue_cnt = regval & STAT_UECNT_MASK; | ||
164 | |||
165 | regval = readl(base + CE_LOG_OFST); | ||
166 | if (!(p->ce_cnt && (regval & LOG_VALID))) | ||
167 | goto ue_err; | ||
168 | |||
169 | p->ceinfo.bitpos = (regval & CE_LOG_BITPOS_MASK) >> CE_LOG_BITPOS_SHIFT; | ||
170 | regval = readl(base + CE_ADDR_OFST); | ||
171 | p->ceinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT; | ||
172 | p->ceinfo.col = regval & ADDR_COL_MASK; | ||
173 | p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT; | ||
174 | p->ceinfo.data = readl(base + CE_DATA_31_0_OFST); | ||
175 | edac_dbg(3, "ce bit position: %d data: %d\n", p->ceinfo.bitpos, | ||
176 | p->ceinfo.data); | ||
177 | clearval = ECC_CTRL_CLR_CE_ERR; | ||
178 | |||
179 | ue_err: | ||
180 | regval = readl(base + UE_LOG_OFST); | ||
181 | if (!(p->ue_cnt && (regval & LOG_VALID))) | ||
182 | goto out; | ||
183 | |||
184 | regval = readl(base + UE_ADDR_OFST); | ||
185 | p->ueinfo.row = (regval & ADDR_ROW_MASK) >> ADDR_ROW_SHIFT; | ||
186 | p->ueinfo.col = regval & ADDR_COL_MASK; | ||
187 | p->ueinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT; | ||
188 | p->ueinfo.data = readl(base + UE_DATA_31_0_OFST); | ||
189 | clearval |= ECC_CTRL_CLR_UE_ERR; | ||
190 | |||
191 | out: | ||
192 | writel(clearval, base + ECC_CTRL_OFST); | ||
193 | writel(0x0, base + ECC_CTRL_OFST); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * synps_edac_handle_error - Handle controller error types CE and UE | ||
200 | * @mci: Pointer to the edac memory controller instance | ||
201 | * @p: Pointer to the synopsys ecc status structure | ||
202 | * | ||
203 | * Handles the controller ECC correctable and un correctable error. | ||
204 | */ | ||
205 | static void synps_edac_handle_error(struct mem_ctl_info *mci, | ||
206 | struct synps_ecc_status *p) | ||
207 | { | ||
208 | struct synps_edac_priv *priv = mci->pvt_info; | ||
209 | struct ecc_error_info *pinf; | ||
210 | |||
211 | if (p->ce_cnt) { | ||
212 | pinf = &p->ceinfo; | ||
213 | snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, | ||
214 | "DDR ECC error type :%s Row %d Bank %d Col %d ", | ||
215 | "CE", pinf->row, pinf->bank, pinf->col); | ||
216 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, | ||
217 | p->ce_cnt, 0, 0, 0, 0, 0, -1, | ||
218 | priv->message, ""); | ||
219 | } | ||
220 | |||
221 | if (p->ue_cnt) { | ||
222 | pinf = &p->ueinfo; | ||
223 | snprintf(priv->message, SYNPS_EDAC_MSG_SIZE, | ||
224 | "DDR ECC error type :%s Row %d Bank %d Col %d ", | ||
225 | "UE", pinf->row, pinf->bank, pinf->col); | ||
226 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, | ||
227 | p->ue_cnt, 0, 0, 0, 0, 0, -1, | ||
228 | priv->message, ""); | ||
229 | } | ||
230 | |||
231 | memset(p, 0, sizeof(*p)); | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * synps_edac_check - Check controller for ECC errors | ||
236 | * @mci: Pointer to the edac memory controller instance | ||
237 | * | ||
238 | * Used to check and post ECC errors. Called by the polling thread | ||
239 | */ | ||
240 | static void synps_edac_check(struct mem_ctl_info *mci) | ||
241 | { | ||
242 | struct synps_edac_priv *priv = mci->pvt_info; | ||
243 | int status; | ||
244 | |||
245 | status = synps_edac_geterror_info(priv->baseaddr, &priv->stat); | ||
246 | if (status) | ||
247 | return; | ||
248 | |||
249 | priv->ce_cnt += priv->stat.ce_cnt; | ||
250 | priv->ue_cnt += priv->stat.ue_cnt; | ||
251 | synps_edac_handle_error(mci, &priv->stat); | ||
252 | |||
253 | edac_dbg(3, "Total error count ce %d ue %d\n", | ||
254 | priv->ce_cnt, priv->ue_cnt); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * synps_edac_get_dtype - Return the controller memory width | ||
259 | * @base: Pointer to the ddr memory controller base address | ||
260 | * | ||
261 | * Get the EDAC device type width appropriate for the current controller | ||
262 | * configuration. | ||
263 | * | ||
264 | * Return: a device type width enumeration. | ||
265 | */ | ||
266 | static enum dev_type synps_edac_get_dtype(const void __iomem *base) | ||
267 | { | ||
268 | enum dev_type dt; | ||
269 | u32 width; | ||
270 | |||
271 | width = readl(base + CTRL_OFST); | ||
272 | width = (width & CTRL_BW_MASK) >> CTRL_BW_SHIFT; | ||
273 | |||
274 | switch (width) { | ||
275 | case DDRCTL_WDTH_16: | ||
276 | dt = DEV_X2; | ||
277 | break; | ||
278 | case DDRCTL_WDTH_32: | ||
279 | dt = DEV_X4; | ||
280 | break; | ||
281 | default: | ||
282 | dt = DEV_UNKNOWN; | ||
283 | } | ||
284 | |||
285 | return dt; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * synps_edac_get_eccstate - Return the controller ecc enable/disable status | ||
290 | * @base: Pointer to the ddr memory controller base address | ||
291 | * | ||
292 | * Get the ECC enable/disable status for the controller | ||
293 | * | ||
294 | * Return: a ecc status boolean i.e true/false - enabled/disabled. | ||
295 | */ | ||
296 | static bool synps_edac_get_eccstate(void __iomem *base) | ||
297 | { | ||
298 | enum dev_type dt; | ||
299 | u32 ecctype; | ||
300 | bool state = false; | ||
301 | |||
302 | dt = synps_edac_get_dtype(base); | ||
303 | if (dt == DEV_UNKNOWN) | ||
304 | return state; | ||
305 | |||
306 | ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK; | ||
307 | if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2)) | ||
308 | state = true; | ||
309 | |||
310 | return state; | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * synps_edac_get_memsize - reads the size of the attached memory device | ||
315 | * | ||
316 | * Return: the memory size in bytes | ||
317 | */ | ||
318 | static u32 synps_edac_get_memsize(void) | ||
319 | { | ||
320 | struct sysinfo inf; | ||
321 | |||
322 | si_meminfo(&inf); | ||
323 | |||
324 | return inf.totalram * inf.mem_unit; | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * synps_edac_get_mtype - Returns controller memory type | ||
329 | * @base: pointer to the synopsys ecc status structure | ||
330 | * | ||
331 | * Get the EDAC memory type appropriate for the current controller | ||
332 | * configuration. | ||
333 | * | ||
334 | * Return: a memory type enumeration. | ||
335 | */ | ||
336 | static enum mem_type synps_edac_get_mtype(const void __iomem *base) | ||
337 | { | ||
338 | enum mem_type mt; | ||
339 | u32 memtype; | ||
340 | |||
341 | memtype = readl(base + T_ZQ_OFST); | ||
342 | |||
343 | if (memtype & T_ZQ_DDRMODE_MASK) | ||
344 | mt = MEM_DDR3; | ||
345 | else | ||
346 | mt = MEM_DDR2; | ||
347 | |||
348 | return mt; | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * synps_edac_init_csrows - Initialize the cs row data | ||
353 | * @mci: Pointer to the edac memory controller instance | ||
354 | * | ||
355 | * Initializes the chip select rows associated with the EDAC memory | ||
356 | * controller instance | ||
357 | * | ||
358 | * Return: Unconditionally 0. | ||
359 | */ | ||
360 | static int synps_edac_init_csrows(struct mem_ctl_info *mci) | ||
361 | { | ||
362 | struct csrow_info *csi; | ||
363 | struct dimm_info *dimm; | ||
364 | struct synps_edac_priv *priv = mci->pvt_info; | ||
365 | u32 size; | ||
366 | int row, j; | ||
367 | |||
368 | for (row = 0; row < mci->nr_csrows; row++) { | ||
369 | csi = mci->csrows[row]; | ||
370 | size = synps_edac_get_memsize(); | ||
371 | |||
372 | for (j = 0; j < csi->nr_channels; j++) { | ||
373 | dimm = csi->channels[j]->dimm; | ||
374 | dimm->edac_mode = EDAC_FLAG_SECDED; | ||
375 | dimm->mtype = synps_edac_get_mtype(priv->baseaddr); | ||
376 | dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels; | ||
377 | dimm->grain = SYNPS_EDAC_ERR_GRAIN; | ||
378 | dimm->dtype = synps_edac_get_dtype(priv->baseaddr); | ||
379 | } | ||
380 | } | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * synps_edac_mc_init - Initialize driver instance | ||
387 | * @mci: Pointer to the edac memory controller instance | ||
388 | * @pdev: Pointer to the platform_device struct | ||
389 | * | ||
390 | * Performs initialization of the EDAC memory controller instance and | ||
391 | * related driver-private data associated with the memory controller the | ||
392 | * instance is bound to. | ||
393 | * | ||
394 | * Return: Always zero. | ||
395 | */ | ||
396 | static int synps_edac_mc_init(struct mem_ctl_info *mci, | ||
397 | struct platform_device *pdev) | ||
398 | { | ||
399 | int status; | ||
400 | struct synps_edac_priv *priv; | ||
401 | |||
402 | mci->pdev = &pdev->dev; | ||
403 | priv = mci->pvt_info; | ||
404 | platform_set_drvdata(pdev, mci); | ||
405 | |||
406 | /* Initialize controller capabilities and configuration */ | ||
407 | mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2; | ||
408 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | ||
409 | mci->scrub_cap = SCRUB_HW_SRC; | ||
410 | mci->scrub_mode = SCRUB_NONE; | ||
411 | |||
412 | mci->edac_cap = EDAC_FLAG_SECDED; | ||
413 | mci->ctl_name = "synps_ddr_controller"; | ||
414 | mci->dev_name = SYNPS_EDAC_MOD_STRING; | ||
415 | mci->mod_name = SYNPS_EDAC_MOD_VER; | ||
416 | mci->mod_ver = "1"; | ||
417 | |||
418 | edac_op_state = EDAC_OPSTATE_POLL; | ||
419 | mci->edac_check = synps_edac_check; | ||
420 | mci->ctl_page_to_phys = NULL; | ||
421 | |||
422 | status = synps_edac_init_csrows(mci); | ||
423 | |||
424 | return status; | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * synps_edac_mc_probe - Check controller and bind driver | ||
429 | * @pdev: Pointer to the platform_device struct | ||
430 | * | ||
431 | * Probes a specific controller instance for binding with the driver. | ||
432 | * | ||
433 | * Return: 0 if the controller instance was successfully bound to the | ||
434 | * driver; otherwise, < 0 on error. | ||
435 | */ | ||
436 | static int synps_edac_mc_probe(struct platform_device *pdev) | ||
437 | { | ||
438 | struct mem_ctl_info *mci; | ||
439 | struct edac_mc_layer layers[2]; | ||
440 | struct synps_edac_priv *priv; | ||
441 | int rc; | ||
442 | struct resource *res; | ||
443 | void __iomem *baseaddr; | ||
444 | |||
445 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
446 | baseaddr = devm_ioremap_resource(&pdev->dev, res); | ||
447 | if (IS_ERR(baseaddr)) | ||
448 | return PTR_ERR(baseaddr); | ||
449 | |||
450 | if (!synps_edac_get_eccstate(baseaddr)) { | ||
451 | edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); | ||
452 | return -ENXIO; | ||
453 | } | ||
454 | |||
455 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; | ||
456 | layers[0].size = SYNPS_EDAC_NR_CSROWS; | ||
457 | layers[0].is_virt_csrow = true; | ||
458 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | ||
459 | layers[1].size = SYNPS_EDAC_NR_CHANS; | ||
460 | layers[1].is_virt_csrow = false; | ||
461 | |||
462 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, | ||
463 | sizeof(struct synps_edac_priv)); | ||
464 | if (!mci) { | ||
465 | edac_printk(KERN_ERR, EDAC_MC, | ||
466 | "Failed memory allocation for mc instance\n"); | ||
467 | return -ENOMEM; | ||
468 | } | ||
469 | |||
470 | priv = mci->pvt_info; | ||
471 | priv->baseaddr = baseaddr; | ||
472 | rc = synps_edac_mc_init(mci, pdev); | ||
473 | if (rc) { | ||
474 | edac_printk(KERN_ERR, EDAC_MC, | ||
475 | "Failed to initialize instance\n"); | ||
476 | goto free_edac_mc; | ||
477 | } | ||
478 | |||
479 | rc = edac_mc_add_mc(mci); | ||
480 | if (rc) { | ||
481 | edac_printk(KERN_ERR, EDAC_MC, | ||
482 | "Failed to register with EDAC core\n"); | ||
483 | goto free_edac_mc; | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Start capturing the correctable and uncorrectable errors. A write of | ||
488 | * 0 starts the counters. | ||
489 | */ | ||
490 | writel(0x0, baseaddr + ECC_CTRL_OFST); | ||
491 | return rc; | ||
492 | |||
493 | free_edac_mc: | ||
494 | edac_mc_free(mci); | ||
495 | |||
496 | return rc; | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * synps_edac_mc_remove - Unbind driver from controller | ||
501 | * @pdev: Pointer to the platform_device struct | ||
502 | * | ||
503 | * Return: Unconditionally 0 | ||
504 | */ | ||
505 | static int synps_edac_mc_remove(struct platform_device *pdev) | ||
506 | { | ||
507 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); | ||
508 | |||
509 | edac_mc_del_mc(&pdev->dev); | ||
510 | edac_mc_free(mci); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static struct of_device_id synps_edac_match[] = { | ||
516 | { .compatible = "xlnx,zynq-ddrc-a05", }, | ||
517 | { /* end of table */ } | ||
518 | }; | ||
519 | |||
520 | MODULE_DEVICE_TABLE(of, synps_edac_match); | ||
521 | |||
522 | static struct platform_driver synps_edac_mc_driver = { | ||
523 | .driver = { | ||
524 | .name = "synopsys-edac", | ||
525 | .of_match_table = synps_edac_match, | ||
526 | }, | ||
527 | .probe = synps_edac_mc_probe, | ||
528 | .remove = synps_edac_mc_remove, | ||
529 | }; | ||
530 | |||
531 | module_platform_driver(synps_edac_mc_driver); | ||
532 | |||
533 | MODULE_AUTHOR("Xilinx Inc"); | ||
534 | MODULE_DESCRIPTION("Synopsys DDR ECC driver"); | ||
535 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index f712d47f30d8..8de4da5c9ab6 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig | |||
@@ -12,11 +12,11 @@ config EFI_VARS | |||
12 | 12 | ||
13 | Note that using this driver in concert with efibootmgr requires | 13 | Note that using this driver in concert with efibootmgr requires |
14 | at least test release version 0.5.0-test3 or later, which is | 14 | at least test release version 0.5.0-test3 or later, which is |
15 | available from Matt Domsch's website located at: | 15 | available from: |
16 | <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz> | 16 | <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz> |
17 | 17 | ||
18 | Subsequent efibootmgr releases may be found at: | 18 | Subsequent efibootmgr releases may be found at: |
19 | <http://linux.dell.com/efibootmgr> | 19 | <http://github.com/vathpela/efibootmgr> |
20 | 20 | ||
21 | config EFI_VARS_PSTORE | 21 | config EFI_VARS_PSTORE |
22 | tristate "Register efivars backend for pstore" | 22 | tristate "Register efivars backend for pstore" |
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9035c1b74d58..fccb464928c3 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -115,15 +115,24 @@ EFI_ATTR_SHOW(fw_vendor); | |||
115 | EFI_ATTR_SHOW(runtime); | 115 | EFI_ATTR_SHOW(runtime); |
116 | EFI_ATTR_SHOW(config_table); | 116 | EFI_ATTR_SHOW(config_table); |
117 | 117 | ||
118 | static ssize_t fw_platform_size_show(struct kobject *kobj, | ||
119 | struct kobj_attribute *attr, char *buf) | ||
120 | { | ||
121 | return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); | ||
122 | } | ||
123 | |||
118 | static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor); | 124 | static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor); |
119 | static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime); | 125 | static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime); |
120 | static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table); | 126 | static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table); |
127 | static struct kobj_attribute efi_attr_fw_platform_size = | ||
128 | __ATTR_RO(fw_platform_size); | ||
121 | 129 | ||
122 | static struct attribute *efi_subsys_attrs[] = { | 130 | static struct attribute *efi_subsys_attrs[] = { |
123 | &efi_attr_systab.attr, | 131 | &efi_attr_systab.attr, |
124 | &efi_attr_fw_vendor.attr, | 132 | &efi_attr_fw_vendor.attr, |
125 | &efi_attr_runtime.attr, | 133 | &efi_attr_runtime.attr, |
126 | &efi_attr_config_table.attr, | 134 | &efi_attr_config_table.attr, |
135 | &efi_attr_fw_platform_size.attr, | ||
127 | NULL, | 136 | NULL, |
128 | }; | 137 | }; |
129 | 138 | ||
@@ -272,15 +281,10 @@ static __init int match_config_table(efi_guid_t *guid, | |||
272 | unsigned long table, | 281 | unsigned long table, |
273 | efi_config_table_type_t *table_types) | 282 | efi_config_table_type_t *table_types) |
274 | { | 283 | { |
275 | u8 str[EFI_VARIABLE_GUID_LEN + 1]; | ||
276 | int i; | 284 | int i; |
277 | 285 | ||
278 | if (table_types) { | 286 | if (table_types) { |
279 | efi_guid_unparse(guid, str); | ||
280 | |||
281 | for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { | 287 | for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { |
282 | efi_guid_unparse(&table_types[i].guid, str); | ||
283 | |||
284 | if (!efi_guidcmp(*guid, table_types[i].guid)) { | 288 | if (!efi_guidcmp(*guid, table_types[i].guid)) { |
285 | *(table_types[i].ptr) = table; | 289 | *(table_types[i].ptr) = table; |
286 | pr_cont(" %s=0x%lx ", | 290 | pr_cont(" %s=0x%lx ", |
@@ -403,8 +407,7 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname, | |||
403 | u64 val; | 407 | u64 val; |
404 | int i, len; | 408 | int i, len; |
405 | 409 | ||
406 | if (depth != 1 || | 410 | if (depth != 1 || strcmp(uname, "chosen") != 0) |
407 | (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) | ||
408 | return 0; | 411 | return 0; |
409 | 412 | ||
410 | for (i = 0; i < ARRAY_SIZE(dt_params); i++) { | 413 | for (i = 0; i < ARRAY_SIZE(dt_params); i++) { |
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index f256ecd8a176..7b2e0496e0c0 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c | |||
@@ -39,7 +39,7 @@ | |||
39 | * fix locking per Peter Chubb's findings | 39 | * fix locking per Peter Chubb's findings |
40 | * | 40 | * |
41 | * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com> | 41 | * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com> |
42 | * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_unparse() | 42 | * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str() |
43 | * | 43 | * |
44 | * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com> | 44 | * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com> |
45 | * use list_for_each_safe when deleting vars. | 45 | * use list_for_each_safe when deleting vars. |
@@ -128,7 +128,7 @@ efivar_guid_read(struct efivar_entry *entry, char *buf) | |||
128 | if (!entry || !buf) | 128 | if (!entry || !buf) |
129 | return 0; | 129 | return 0; |
130 | 130 | ||
131 | efi_guid_unparse(&var->VendorGuid, str); | 131 | efi_guid_to_str(&var->VendorGuid, str); |
132 | str += strlen(str); | 132 | str += strlen(str); |
133 | str += sprintf(str, "\n"); | 133 | str += sprintf(str, "\n"); |
134 | 134 | ||
@@ -569,7 +569,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) | |||
569 | private variables from another's. */ | 569 | private variables from another's. */ |
570 | 570 | ||
571 | *(short_name + strlen(short_name)) = '-'; | 571 | *(short_name + strlen(short_name)) = '-'; |
572 | efi_guid_unparse(&new_var->var.VendorGuid, | 572 | efi_guid_to_str(&new_var->var.VendorGuid, |
573 | short_name + strlen(short_name)); | 573 | short_name + strlen(short_name)); |
574 | 574 | ||
575 | new_var->kobj.kset = efivars_kset; | 575 | new_var->kobj.kset = efivars_kset; |
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index b14bc2b9fb4d..8902f52e0998 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile | |||
@@ -24,3 +24,17 @@ lib-y := efi-stub-helper.o | |||
24 | lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o | 24 | lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o |
25 | 25 | ||
26 | CFLAGS_fdt.o += -I$(srctree)/scripts/dtc/libfdt/ | 26 | CFLAGS_fdt.o += -I$(srctree)/scripts/dtc/libfdt/ |
27 | |||
28 | # | ||
29 | # arm64 puts the stub in the kernel proper, which will unnecessarily retain all | ||
30 | # code indefinitely unless it is annotated as __init/__initdata/__initconst etc. | ||
31 | # So let's apply the __init annotations at the section level, by prefixing | ||
32 | # the section names directly. This will ensure that even all the inline string | ||
33 | # literals are covered. | ||
34 | # | ||
35 | extra-$(CONFIG_ARM64) := $(lib-y) | ||
36 | lib-$(CONFIG_ARM64) := $(patsubst %.o,%.init.o,$(lib-y)) | ||
37 | |||
38 | OBJCOPYFLAGS := --prefix-alloc-sections=.init | ||
39 | $(obj)/%.init.o: $(obj)/%.o FORCE | ||
40 | $(call if_changed,objcopy) | ||
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index eb48a1a1a576..2b3814702dcf 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -17,10 +17,10 @@ | |||
17 | 17 | ||
18 | #include "efistub.h" | 18 | #include "efistub.h" |
19 | 19 | ||
20 | static int __init efi_secureboot_enabled(efi_system_table_t *sys_table_arg) | 20 | static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg) |
21 | { | 21 | { |
22 | static efi_guid_t const var_guid __initconst = EFI_GLOBAL_VARIABLE_GUID; | 22 | static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID; |
23 | static efi_char16_t const var_name[] __initconst = { | 23 | static efi_char16_t const var_name[] = { |
24 | 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 }; | 24 | 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 }; |
25 | 25 | ||
26 | efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable; | 26 | efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable; |
@@ -164,7 +164,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, | |||
164 | * for both archictectures, with the arch-specific code provided in the | 164 | * for both archictectures, with the arch-specific code provided in the |
165 | * handle_kernel_image() function. | 165 | * handle_kernel_image() function. |
166 | */ | 166 | */ |
167 | unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table, | 167 | unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, |
168 | unsigned long *image_addr) | 168 | unsigned long *image_addr) |
169 | { | 169 | { |
170 | efi_loaded_image_t *image; | 170 | efi_loaded_image_t *image; |
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index a920fec8fe88..d073e3946383 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c | |||
@@ -66,25 +66,29 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, | |||
66 | unsigned long key; | 66 | unsigned long key; |
67 | u32 desc_version; | 67 | u32 desc_version; |
68 | 68 | ||
69 | *map_size = sizeof(*m) * 32; | 69 | *map_size = 0; |
70 | again: | 70 | *desc_size = 0; |
71 | key = 0; | ||
72 | status = efi_call_early(get_memory_map, map_size, NULL, | ||
73 | &key, desc_size, &desc_version); | ||
74 | if (status != EFI_BUFFER_TOO_SMALL) | ||
75 | return EFI_LOAD_ERROR; | ||
76 | |||
71 | /* | 77 | /* |
72 | * Add an additional efi_memory_desc_t because we're doing an | 78 | * Add an additional efi_memory_desc_t because we're doing an |
73 | * allocation which may be in a new descriptor region. | 79 | * allocation which may be in a new descriptor region. |
74 | */ | 80 | */ |
75 | *map_size += sizeof(*m); | 81 | *map_size += *desc_size; |
76 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, | 82 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, |
77 | *map_size, (void **)&m); | 83 | *map_size, (void **)&m); |
78 | if (status != EFI_SUCCESS) | 84 | if (status != EFI_SUCCESS) |
79 | goto fail; | 85 | goto fail; |
80 | 86 | ||
81 | *desc_size = 0; | ||
82 | key = 0; | ||
83 | status = efi_call_early(get_memory_map, map_size, m, | 87 | status = efi_call_early(get_memory_map, map_size, m, |
84 | &key, desc_size, &desc_version); | 88 | &key, desc_size, &desc_version); |
85 | if (status == EFI_BUFFER_TOO_SMALL) { | 89 | if (status == EFI_BUFFER_TOO_SMALL) { |
86 | efi_call_early(free_pool, m); | 90 | efi_call_early(free_pool, m); |
87 | goto again; | 91 | return EFI_LOAD_ERROR; |
88 | } | 92 | } |
89 | 93 | ||
90 | if (status != EFI_SUCCESS) | 94 | if (status != EFI_SUCCESS) |
@@ -101,7 +105,7 @@ fail: | |||
101 | } | 105 | } |
102 | 106 | ||
103 | 107 | ||
104 | unsigned long __init get_dram_base(efi_system_table_t *sys_table_arg) | 108 | unsigned long get_dram_base(efi_system_table_t *sys_table_arg) |
105 | { | 109 | { |
106 | efi_status_t status; | 110 | efi_status_t status; |
107 | unsigned long map_size; | 111 | unsigned long map_size; |
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 018c29a26615..87b8e3b900d2 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c | |||
@@ -191,7 +191,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj) | |||
191 | 191 | ||
192 | return 0; | 192 | return 0; |
193 | out_add_entry: | 193 | out_add_entry: |
194 | for (j = i - 1; j > 0; j--) { | 194 | for (j = i - 1; j >= 0; j--) { |
195 | entry = *(map_entries + j); | 195 | entry = *(map_entries + j); |
196 | kobject_put(&entry->kobj); | 196 | kobject_put(&entry->kobj); |
197 | } | 197 | } |
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index da9c316059bc..eea5d7e578c9 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c | |||
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client, | |||
801 | client->irq = irq_of_parse_and_map(client->dev.of_node, 0); | 801 | client->irq = irq_of_parse_and_map(client->dev.of_node, 0); |
802 | } else { | 802 | } else { |
803 | pdata = dev_get_platdata(&client->dev); | 803 | pdata = dev_get_platdata(&client->dev); |
804 | if (!pdata || !gpio_is_valid(pdata->base)) { | 804 | if (!pdata) { |
805 | dev_dbg(&client->dev, "invalid platform data\n"); | 805 | pdata = devm_kzalloc(&client->dev, |
806 | return -EINVAL; | 806 | sizeof(struct mcp23s08_platform_data), |
807 | GFP_KERNEL); | ||
808 | pdata->base = -1; | ||
807 | } | 809 | } |
808 | } | 810 | } |
809 | 811 | ||
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi) | |||
924 | } else { | 926 | } else { |
925 | type = spi_get_device_id(spi)->driver_data; | 927 | type = spi_get_device_id(spi)->driver_data; |
926 | pdata = dev_get_platdata(&spi->dev); | 928 | pdata = dev_get_platdata(&spi->dev); |
927 | if (!pdata || !gpio_is_valid(pdata->base)) { | 929 | if (!pdata) { |
928 | dev_dbg(&spi->dev, | 930 | pdata = devm_kzalloc(&spi->dev, |
929 | "invalid or missing platform data\n"); | 931 | sizeof(struct mcp23s08_platform_data), |
930 | return -EINVAL; | 932 | GFP_KERNEL); |
933 | pdata->base = -1; | ||
931 | } | 934 | } |
932 | 935 | ||
933 | for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { | 936 | for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 30646cfe0efa..f476ae2eb0b3 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -88,6 +88,8 @@ struct gpio_bank { | |||
88 | #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) | 88 | #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) |
89 | #define LINE_USED(line, offset) (line & (BIT(offset))) | 89 | #define LINE_USED(line, offset) (line & (BIT(offset))) |
90 | 90 | ||
91 | static void omap_gpio_unmask_irq(struct irq_data *d); | ||
92 | |||
91 | static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) | 93 | static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) |
92 | { | 94 | { |
93 | return bank->chip.base + gpio_irq; | 95 | return bank->chip.base + gpio_irq; |
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask) | |||
477 | return readl_relaxed(reg) & mask; | 479 | return readl_relaxed(reg) & mask; |
478 | } | 480 | } |
479 | 481 | ||
482 | static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio, | ||
483 | unsigned offset) | ||
484 | { | ||
485 | if (!LINE_USED(bank->mod_usage, offset)) { | ||
486 | omap_enable_gpio_module(bank, offset); | ||
487 | omap_set_gpio_direction(bank, offset, 1); | ||
488 | } | ||
489 | bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); | ||
490 | } | ||
491 | |||
480 | static int omap_gpio_irq_type(struct irq_data *d, unsigned type) | 492 | static int omap_gpio_irq_type(struct irq_data *d, unsigned type) |
481 | { | 493 | { |
482 | struct gpio_bank *bank = omap_irq_data_get_bank(d); | 494 | struct gpio_bank *bank = omap_irq_data_get_bank(d); |
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) | |||
506 | spin_lock_irqsave(&bank->lock, flags); | 518 | spin_lock_irqsave(&bank->lock, flags); |
507 | offset = GPIO_INDEX(bank, gpio); | 519 | offset = GPIO_INDEX(bank, gpio); |
508 | retval = omap_set_gpio_triggering(bank, offset, type); | 520 | retval = omap_set_gpio_triggering(bank, offset, type); |
509 | if (!LINE_USED(bank->mod_usage, offset)) { | 521 | omap_gpio_init_irq(bank, gpio, offset); |
510 | omap_enable_gpio_module(bank, offset); | 522 | if (!omap_gpio_is_input(bank, BIT(offset))) { |
511 | omap_set_gpio_direction(bank, offset, 1); | ||
512 | } else if (!omap_gpio_is_input(bank, BIT(offset))) { | ||
513 | spin_unlock_irqrestore(&bank->lock, flags); | 523 | spin_unlock_irqrestore(&bank->lock, flags); |
514 | return -EINVAL; | 524 | return -EINVAL; |
515 | } | 525 | } |
516 | |||
517 | bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); | ||
518 | spin_unlock_irqrestore(&bank->lock, flags); | 526 | spin_unlock_irqrestore(&bank->lock, flags); |
519 | 527 | ||
520 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 528 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
@@ -792,6 +800,24 @@ exit: | |||
792 | pm_runtime_put(bank->dev); | 800 | pm_runtime_put(bank->dev); |
793 | } | 801 | } |
794 | 802 | ||
803 | static unsigned int omap_gpio_irq_startup(struct irq_data *d) | ||
804 | { | ||
805 | struct gpio_bank *bank = omap_irq_data_get_bank(d); | ||
806 | unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); | ||
807 | unsigned long flags; | ||
808 | unsigned offset = GPIO_INDEX(bank, gpio); | ||
809 | |||
810 | if (!BANK_USED(bank)) | ||
811 | pm_runtime_get_sync(bank->dev); | ||
812 | |||
813 | spin_lock_irqsave(&bank->lock, flags); | ||
814 | omap_gpio_init_irq(bank, gpio, offset); | ||
815 | spin_unlock_irqrestore(&bank->lock, flags); | ||
816 | omap_gpio_unmask_irq(d); | ||
817 | |||
818 | return 0; | ||
819 | } | ||
820 | |||
795 | static void omap_gpio_irq_shutdown(struct irq_data *d) | 821 | static void omap_gpio_irq_shutdown(struct irq_data *d) |
796 | { | 822 | { |
797 | struct gpio_bank *bank = omap_irq_data_get_bank(d); | 823 | struct gpio_bank *bank = omap_irq_data_get_bank(d); |
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev) | |||
1181 | if (!irqc) | 1207 | if (!irqc) |
1182 | return -ENOMEM; | 1208 | return -ENOMEM; |
1183 | 1209 | ||
1210 | irqc->irq_startup = omap_gpio_irq_startup, | ||
1184 | irqc->irq_shutdown = omap_gpio_irq_shutdown, | 1211 | irqc->irq_shutdown = omap_gpio_irq_shutdown, |
1185 | irqc->irq_ack = omap_gpio_ack_irq, | 1212 | irqc->irq_ack = omap_gpio_ack_irq, |
1186 | irqc->irq_mask = omap_gpio_mask_irq, | 1213 | irqc->irq_mask = omap_gpio_mask_irq, |
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index f62aa115d79a..7722ed53bd65 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c | |||
@@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name, | |||
648 | if (tdev != NULL) { | 648 | if (tdev != NULL) { |
649 | status = sysfs_create_link(&dev->kobj, &tdev->kobj, | 649 | status = sysfs_create_link(&dev->kobj, &tdev->kobj, |
650 | name); | 650 | name); |
651 | put_device(tdev); | ||
651 | } else { | 652 | } else { |
652 | status = -ENODEV; | 653 | status = -ENODEV; |
653 | } | 654 | } |
@@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) | |||
695 | } | 696 | } |
696 | 697 | ||
697 | status = sysfs_set_active_low(desc, dev, value); | 698 | status = sysfs_set_active_low(desc, dev, value); |
698 | 699 | put_device(dev); | |
699 | unlock: | 700 | unlock: |
700 | mutex_unlock(&sysfs_lock); | 701 | mutex_unlock(&sysfs_lock); |
701 | 702 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 633532a2e7ec..25bc47f3c1cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include "kfd_priv.h" | 27 | #include "kfd_priv.h" |
28 | #include "kfd_device_queue_manager.h" | 28 | #include "kfd_device_queue_manager.h" |
29 | #include "kfd_pm4_headers.h" | ||
29 | 30 | ||
30 | #define MQD_SIZE_ALIGNED 768 | 31 | #define MQD_SIZE_ALIGNED 768 |
31 | 32 | ||
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
169 | kfd->shared_resources = *gpu_resources; | 170 | kfd->shared_resources = *gpu_resources; |
170 | 171 | ||
171 | /* calculate max size of mqds needed for queues */ | 172 | /* calculate max size of mqds needed for queues */ |
172 | size = max_num_of_processes * | 173 | size = max_num_of_queues_per_device * |
173 | max_num_of_queues_per_process * | 174 | kfd->device_info->mqd_size_aligned; |
174 | kfd->device_info->mqd_size_aligned; | ||
175 | 175 | ||
176 | /* add another 512KB for all other allocations on gart */ | 176 | /* add another 512KB for all other allocations on gart */ |
177 | size += 512 * 1024; | 177 | size += 512 * 1024; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 30c8fda9622e..0fd592799d58 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
183 | 183 | ||
184 | mutex_lock(&dqm->lock); | 184 | mutex_lock(&dqm->lock); |
185 | 185 | ||
186 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
187 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
188 | dqm->total_queue_count); | ||
189 | mutex_unlock(&dqm->lock); | ||
190 | return -EPERM; | ||
191 | } | ||
192 | |||
186 | if (list_empty(&qpd->queues_list)) { | 193 | if (list_empty(&qpd->queues_list)) { |
187 | retval = allocate_vmid(dqm, qpd, q); | 194 | retval = allocate_vmid(dqm, qpd, q); |
188 | if (retval != 0) { | 195 | if (retval != 0) { |
@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
207 | list_add(&q->list, &qpd->queues_list); | 214 | list_add(&q->list, &qpd->queues_list); |
208 | dqm->queue_count++; | 215 | dqm->queue_count++; |
209 | 216 | ||
217 | /* | ||
218 | * Unconditionally increment this counter, regardless of the queue's | ||
219 | * type or whether the queue is active. | ||
220 | */ | ||
221 | dqm->total_queue_count++; | ||
222 | pr_debug("Total of %d queues are accountable so far\n", | ||
223 | dqm->total_queue_count); | ||
224 | |||
210 | mutex_unlock(&dqm->lock); | 225 | mutex_unlock(&dqm->lock); |
211 | return 0; | 226 | return 0; |
212 | } | 227 | } |
@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, | |||
326 | if (list_empty(&qpd->queues_list)) | 341 | if (list_empty(&qpd->queues_list)) |
327 | deallocate_vmid(dqm, qpd, q); | 342 | deallocate_vmid(dqm, qpd, q); |
328 | dqm->queue_count--; | 343 | dqm->queue_count--; |
344 | |||
345 | /* | ||
346 | * Unconditionally decrement this counter, regardless of the queue's | ||
347 | * type | ||
348 | */ | ||
349 | dqm->total_queue_count--; | ||
350 | pr_debug("Total of %d queues are accountable so far\n", | ||
351 | dqm->total_queue_count); | ||
352 | |||
329 | out: | 353 | out: |
330 | mutex_unlock(&dqm->lock); | 354 | mutex_unlock(&dqm->lock); |
331 | return retval; | 355 | return retval; |
@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm, | |||
541 | 565 | ||
542 | for (i = 0; i < pipes_num; i++) { | 566 | for (i = 0; i < pipes_num; i++) { |
543 | inx = i + first_pipe; | 567 | inx = i + first_pipe; |
568 | /* | ||
569 | * HPD buffer on GTT is allocated by amdkfd, no need to waste | ||
570 | * space in GTT for pipelines we don't initialize | ||
571 | */ | ||
544 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; | 572 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; |
545 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); | 573 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); |
546 | /* = log2(bytes/4)-1 */ | 574 | /* = log2(bytes/4)-1 */ |
547 | kfd2kgd->init_pipeline(dqm->dev->kgd, i, | 575 | kfd2kgd->init_pipeline(dqm->dev->kgd, inx, |
548 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); | 576 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); |
549 | } | 577 | } |
550 | 578 | ||
@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm) | |||
560 | 588 | ||
561 | pr_debug("kfd: In %s\n", __func__); | 589 | pr_debug("kfd: In %s\n", __func__); |
562 | 590 | ||
563 | retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); | 591 | retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); |
564 | if (retval != 0) | 592 | if (retval != 0) |
565 | return retval; | 593 | return retval; |
566 | 594 | ||
@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
752 | pr_debug("kfd: In func %s\n", __func__); | 780 | pr_debug("kfd: In func %s\n", __func__); |
753 | 781 | ||
754 | mutex_lock(&dqm->lock); | 782 | mutex_lock(&dqm->lock); |
783 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
784 | pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", | ||
785 | dqm->total_queue_count); | ||
786 | mutex_unlock(&dqm->lock); | ||
787 | return -EPERM; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * Unconditionally increment this counter, regardless of the queue's | ||
792 | * type or whether the queue is active. | ||
793 | */ | ||
794 | dqm->total_queue_count++; | ||
795 | pr_debug("Total of %d queues are accountable so far\n", | ||
796 | dqm->total_queue_count); | ||
797 | |||
755 | list_add(&kq->list, &qpd->priv_queue_list); | 798 | list_add(&kq->list, &qpd->priv_queue_list); |
756 | dqm->queue_count++; | 799 | dqm->queue_count++; |
757 | qpd->is_debug = true; | 800 | qpd->is_debug = true; |
@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
775 | dqm->queue_count--; | 818 | dqm->queue_count--; |
776 | qpd->is_debug = false; | 819 | qpd->is_debug = false; |
777 | execute_queues_cpsch(dqm, false); | 820 | execute_queues_cpsch(dqm, false); |
821 | /* | ||
822 | * Unconditionally decrement this counter, regardless of the queue's | ||
823 | * type. | ||
824 | */ | ||
825 | dqm->total_queue_count--; | ||
826 | pr_debug("Total of %d queues are accountable so far\n", | ||
827 | dqm->total_queue_count); | ||
778 | mutex_unlock(&dqm->lock); | 828 | mutex_unlock(&dqm->lock); |
779 | } | 829 | } |
780 | 830 | ||
@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
793 | 843 | ||
794 | mutex_lock(&dqm->lock); | 844 | mutex_lock(&dqm->lock); |
795 | 845 | ||
846 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
847 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
848 | dqm->total_queue_count); | ||
849 | retval = -EPERM; | ||
850 | goto out; | ||
851 | } | ||
852 | |||
796 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); | 853 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); |
797 | if (mqd == NULL) { | 854 | if (mqd == NULL) { |
798 | mutex_unlock(&dqm->lock); | 855 | mutex_unlock(&dqm->lock); |
@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
810 | retval = execute_queues_cpsch(dqm, false); | 867 | retval = execute_queues_cpsch(dqm, false); |
811 | } | 868 | } |
812 | 869 | ||
870 | /* | ||
871 | * Unconditionally increment this counter, regardless of the queue's | ||
872 | * type or whether the queue is active. | ||
873 | */ | ||
874 | dqm->total_queue_count++; | ||
875 | |||
876 | pr_debug("Total of %d queues are accountable so far\n", | ||
877 | dqm->total_queue_count); | ||
878 | |||
813 | out: | 879 | out: |
814 | mutex_unlock(&dqm->lock); | 880 | mutex_unlock(&dqm->lock); |
815 | return retval; | 881 | return retval; |
@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, | |||
930 | 996 | ||
931 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | 997 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); |
932 | 998 | ||
999 | /* | ||
1000 | * Unconditionally decrement this counter, regardless of the queue's | ||
1001 | * type | ||
1002 | */ | ||
1003 | dqm->total_queue_count--; | ||
1004 | pr_debug("Total of %d queues are accountable so far\n", | ||
1005 | dqm->total_queue_count); | ||
1006 | |||
933 | mutex_unlock(&dqm->lock); | 1007 | mutex_unlock(&dqm->lock); |
934 | 1008 | ||
935 | return 0; | 1009 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c3f189e8ae35..52035bf0c1cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | |||
@@ -130,6 +130,7 @@ struct device_queue_manager { | |||
130 | struct list_head queues; | 130 | struct list_head queues; |
131 | unsigned int processes_count; | 131 | unsigned int processes_count; |
132 | unsigned int queue_count; | 132 | unsigned int queue_count; |
133 | unsigned int total_queue_count; | ||
133 | unsigned int next_pipe_to_allocate; | 134 | unsigned int next_pipe_to_allocate; |
134 | unsigned int *allocated_queues; | 135 | unsigned int *allocated_queues; |
135 | unsigned int vmid_bitmap; | 136 | unsigned int vmid_bitmap; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 95d5af138e6e..1c385c23dd0b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c | |||
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); | |||
50 | MODULE_PARM_DESC(sched_policy, | 50 | MODULE_PARM_DESC(sched_policy, |
51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); | 51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); |
52 | 52 | ||
53 | int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; | 53 | int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; |
54 | module_param(max_num_of_processes, int, 0444); | 54 | module_param(max_num_of_queues_per_device, int, 0444); |
55 | MODULE_PARM_DESC(max_num_of_processes, | 55 | MODULE_PARM_DESC(max_num_of_queues_per_device, |
56 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); | 56 | "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); |
57 | |||
58 | int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; | ||
59 | module_param(max_num_of_queues_per_process, int, 0444); | ||
60 | MODULE_PARM_DESC(max_num_of_queues_per_process, | ||
61 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); | ||
62 | 57 | ||
63 | bool kgd2kfd_init(unsigned interface_version, | 58 | bool kgd2kfd_init(unsigned interface_version, |
64 | const struct kfd2kgd_calls *f2g, | 59 | const struct kfd2kgd_calls *f2g, |
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void) | |||
100 | } | 95 | } |
101 | 96 | ||
102 | /* Verify module parameters */ | 97 | /* Verify module parameters */ |
103 | if ((max_num_of_processes < 0) || | 98 | if ((max_num_of_queues_per_device < 1) || |
104 | (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { | 99 | (max_num_of_queues_per_device > |
105 | pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); | 100 | KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { |
106 | return -1; | 101 | pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); |
107 | } | ||
108 | |||
109 | if ((max_num_of_queues_per_process < 0) || | ||
110 | (max_num_of_queues_per_process > | ||
111 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { | ||
112 | pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); | ||
113 | return -1; | 102 | return -1; |
114 | } | 103 | } |
115 | 104 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | |||
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); | |||
30 | 30 | ||
31 | int kfd_pasid_init(void) | 31 | int kfd_pasid_init(void) |
32 | { | 32 | { |
33 | pasid_limit = max_num_of_processes; | 33 | pasid_limit = KFD_MAX_NUM_OF_PROCESSES; |
34 | 34 | ||
35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); | 35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); |
36 | if (!pasid_bitmap) | 36 | if (!pasid_bitmap) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b3dc13c83169..96dc10e8904a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
@@ -52,20 +52,19 @@ | |||
52 | #define kfd_alloc_struct(ptr_to_struct) \ | 52 | #define kfd_alloc_struct(ptr_to_struct) \ |
53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) | 53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) |
54 | 54 | ||
55 | /* Kernel module parameter to specify maximum number of supported processes */ | ||
56 | extern int max_num_of_processes; | ||
57 | |||
58 | #define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 | ||
59 | #define KFD_MAX_NUM_OF_PROCESSES 512 | 55 | #define KFD_MAX_NUM_OF_PROCESSES 512 |
56 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | ||
60 | 57 | ||
61 | /* | 58 | /* |
62 | * Kernel module parameter to specify maximum number of supported queues | 59 | * Kernel module parameter to specify maximum number of supported queues per |
63 | * per process | 60 | * device |
64 | */ | 61 | */ |
65 | extern int max_num_of_queues_per_process; | 62 | extern int max_num_of_queues_per_device; |
66 | 63 | ||
67 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 | 64 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 |
68 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | 65 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ |
66 | (KFD_MAX_NUM_OF_PROCESSES * \ | ||
67 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) | ||
69 | 68 | ||
70 | #define KFD_KERNEL_QUEUE_SIZE 2048 | 69 | #define KFD_KERNEL_QUEUE_SIZE 2048 |
71 | 70 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 47526780d736..2fda1927bff7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | |||
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, | |||
54 | pr_debug("kfd: in %s\n", __func__); | 54 | pr_debug("kfd: in %s\n", __func__); |
55 | 55 | ||
56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, | 56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, |
57 | max_num_of_queues_per_process); | 57 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); |
58 | 58 | ||
59 | pr_debug("kfd: the new slot id %lu\n", found); | 59 | pr_debug("kfd: the new slot id %lu\n", found); |
60 | 60 | ||
61 | if (found >= max_num_of_queues_per_process) { | 61 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { |
62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", | 62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", |
63 | pqm->process->pasid); | 63 | pqm->process->pasid); |
64 | return -ENOMEM; | 64 | return -ENOMEM; |
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) | |||
76 | 76 | ||
77 | INIT_LIST_HEAD(&pqm->queues); | 77 | INIT_LIST_HEAD(&pqm->queues); |
78 | pqm->queue_slot_bitmap = | 78 | pqm->queue_slot_bitmap = |
79 | kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, | 79 | kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, |
80 | BITS_PER_BYTE), GFP_KERNEL); | 80 | BITS_PER_BYTE), GFP_KERNEL); |
81 | if (pqm->queue_slot_bitmap == NULL) | 81 | if (pqm->queue_slot_bitmap == NULL) |
82 | return -ENOMEM; | 82 | return -ENOMEM; |
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
203 | pqn->kq = NULL; | 203 | pqn->kq = NULL; |
204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, | 204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, |
205 | &q->properties.vmid); | 205 | &q->properties.vmid); |
206 | pr_debug("DQM returned %d for create_queue\n", retval); | ||
206 | print_queue(q); | 207 | print_queue(q); |
207 | break; | 208 | break; |
208 | case KFD_QUEUE_TYPE_DIQ: | 209 | case KFD_QUEUE_TYPE_DIQ: |
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
222 | } | 223 | } |
223 | 224 | ||
224 | if (retval != 0) { | 225 | if (retval != 0) { |
225 | pr_err("kfd: error dqm create queue\n"); | 226 | pr_debug("Error dqm create queue\n"); |
226 | goto err_create_queue; | 227 | goto err_create_queue; |
227 | } | 228 | } |
228 | 229 | ||
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
241 | err_create_queue: | 242 | err_create_queue: |
242 | kfree(pqn); | 243 | kfree(pqn); |
243 | err_allocate_pqn: | 244 | err_allocate_pqn: |
245 | /* check if queues list is empty unregister process from device */ | ||
244 | clear_bit(*qid, pqm->queue_slot_bitmap); | 246 | clear_bit(*qid, pqm->queue_slot_bitmap); |
247 | if (list_empty(&pqm->queues)) | ||
248 | dev->dqm->unregister_process(dev->dqm, &pdd->qpd); | ||
245 | return retval; | 249 | return retval; |
246 | } | 250 | } |
247 | 251 | ||
@@ -311,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, | |||
311 | BUG_ON(!pqm); | 315 | BUG_ON(!pqm); |
312 | 316 | ||
313 | pqn = get_queue_by_qid(pqm, qid); | 317 | pqn = get_queue_by_qid(pqm, qid); |
314 | BUG_ON(!pqn); | 318 | if (!pqn) { |
319 | pr_debug("amdkfd: No queue %d exists for update operation\n", | ||
320 | qid); | ||
321 | return -EFAULT; | ||
322 | } | ||
315 | 323 | ||
316 | pqn->q->properties.queue_address = p->queue_address; | 324 | pqn->q->properties.queue_address = p->queue_address; |
317 | pqn->q->properties.queue_size = p->queue_size; | 325 | pqn->q->properties.queue_size = p->queue_size; |
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index c2a1cba1e984..b9140032962d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c | |||
@@ -16,9 +16,12 @@ | |||
16 | #include "cirrus_drv.h" | 16 | #include "cirrus_drv.h" |
17 | 17 | ||
18 | int cirrus_modeset = -1; | 18 | int cirrus_modeset = -1; |
19 | int cirrus_bpp = 24; | ||
19 | 20 | ||
20 | MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); | 21 | MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); |
21 | module_param_named(modeset, cirrus_modeset, int, 0400); | 22 | module_param_named(modeset, cirrus_modeset, int, 0400); |
23 | MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)"); | ||
24 | module_param_named(bpp, cirrus_bpp, int, 0400); | ||
22 | 25 | ||
23 | /* | 26 | /* |
24 | * This is the generic driver code. This binds the driver to the drm core, | 27 | * This is the generic driver code. This binds the driver to the drm core, |
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 693a4565c4ff..705061537a27 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h | |||
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo) | |||
262 | 262 | ||
263 | int cirrus_bo_push_sysram(struct cirrus_bo *bo); | 263 | int cirrus_bo_push_sysram(struct cirrus_bo *bo); |
264 | int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); | 264 | int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); |
265 | |||
266 | extern int cirrus_bpp; | ||
267 | |||
265 | #endif /* __CIRRUS_DRV_H__ */ | 268 | #endif /* __CIRRUS_DRV_H__ */ |
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 4c2d68e9102d..e4b976658087 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c | |||
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, | |||
320 | const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ | 320 | const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */ |
321 | const int max_size = cdev->mc.vram_size; | 321 | const int max_size = cdev->mc.vram_size; |
322 | 322 | ||
323 | if (bpp > cirrus_bpp) | ||
324 | return false; | ||
323 | if (bpp > 32) | 325 | if (bpp > 32) |
324 | return false; | 326 | return false; |
325 | 327 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 99d4a74ffeaf..61385f2298bf 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c | |||
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector) | |||
501 | int count; | 501 | int count; |
502 | 502 | ||
503 | /* Just add a static list of modes */ | 503 | /* Just add a static list of modes */ |
504 | count = drm_add_modes_noedid(connector, 1280, 1024); | 504 | if (cirrus_bpp <= 24) { |
505 | drm_set_preferred_mode(connector, 1024, 768); | 505 | count = drm_add_modes_noedid(connector, 1280, 1024); |
506 | drm_set_preferred_mode(connector, 1024, 768); | ||
507 | } else { | ||
508 | count = drm_add_modes_noedid(connector, 800, 600); | ||
509 | drm_set_preferred_mode(connector, 800, 600); | ||
510 | } | ||
506 | return count; | 511 | return count; |
507 | } | 512 | } |
508 | 513 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index cf775a4449c1..dc386ebe5193 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_ | |||
145 | } | 145 | } |
146 | EXPORT_SYMBOL(drm_fb_helper_add_one_connector); | 146 | EXPORT_SYMBOL(drm_fb_helper_add_one_connector); |
147 | 147 | ||
148 | static void remove_from_modeset(struct drm_mode_set *set, | ||
149 | struct drm_connector *connector) | ||
150 | { | ||
151 | int i, j; | ||
152 | |||
153 | for (i = 0; i < set->num_connectors; i++) { | ||
154 | if (set->connectors[i] == connector) | ||
155 | break; | ||
156 | } | ||
157 | |||
158 | if (i == set->num_connectors) | ||
159 | return; | ||
160 | |||
161 | for (j = i + 1; j < set->num_connectors; j++) { | ||
162 | set->connectors[j - 1] = set->connectors[j]; | ||
163 | } | ||
164 | set->num_connectors--; | ||
165 | |||
166 | /* because i915 is pissy about this.. | ||
167 | * TODO maybe need to makes sure we set it back to !=NULL somewhere? | ||
168 | */ | ||
169 | if (set->num_connectors == 0) | ||
170 | set->fb = NULL; | ||
171 | } | ||
172 | |||
148 | int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, | 173 | int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, |
149 | struct drm_connector *connector) | 174 | struct drm_connector *connector) |
150 | { | 175 | { |
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, | |||
167 | } | 192 | } |
168 | fb_helper->connector_count--; | 193 | fb_helper->connector_count--; |
169 | kfree(fb_helper_connector); | 194 | kfree(fb_helper_connector); |
195 | |||
196 | /* also cleanup dangling references to the connector: */ | ||
197 | for (i = 0; i < fb_helper->crtc_count; i++) | ||
198 | remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); | ||
199 | |||
170 | return 0; | 200 | return 0; |
171 | } | 201 | } |
172 | EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); | 202 | EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d4762799351d..a9041d1a8ff0 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -32,6 +32,8 @@ | |||
32 | struct tda998x_priv { | 32 | struct tda998x_priv { |
33 | struct i2c_client *cec; | 33 | struct i2c_client *cec; |
34 | struct i2c_client *hdmi; | 34 | struct i2c_client *hdmi; |
35 | struct mutex mutex; | ||
36 | struct delayed_work dwork; | ||
35 | uint16_t rev; | 37 | uint16_t rev; |
36 | uint8_t current_page; | 38 | uint8_t current_page; |
37 | int dpms; | 39 | int dpms; |
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) | |||
402 | uint8_t addr = REG2ADDR(reg); | 404 | uint8_t addr = REG2ADDR(reg); |
403 | int ret; | 405 | int ret; |
404 | 406 | ||
407 | mutex_lock(&priv->mutex); | ||
405 | ret = set_page(priv, reg); | 408 | ret = set_page(priv, reg); |
406 | if (ret < 0) | 409 | if (ret < 0) |
407 | return ret; | 410 | goto out; |
408 | 411 | ||
409 | ret = i2c_master_send(client, &addr, sizeof(addr)); | 412 | ret = i2c_master_send(client, &addr, sizeof(addr)); |
410 | if (ret < 0) | 413 | if (ret < 0) |
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) | |||
414 | if (ret < 0) | 417 | if (ret < 0) |
415 | goto fail; | 418 | goto fail; |
416 | 419 | ||
417 | return ret; | 420 | goto out; |
418 | 421 | ||
419 | fail: | 422 | fail: |
420 | dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); | 423 | dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); |
424 | out: | ||
425 | mutex_unlock(&priv->mutex); | ||
421 | return ret; | 426 | return ret; |
422 | } | 427 | } |
423 | 428 | ||
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) | |||
431 | buf[0] = REG2ADDR(reg); | 436 | buf[0] = REG2ADDR(reg); |
432 | memcpy(&buf[1], p, cnt); | 437 | memcpy(&buf[1], p, cnt); |
433 | 438 | ||
439 | mutex_lock(&priv->mutex); | ||
434 | ret = set_page(priv, reg); | 440 | ret = set_page(priv, reg); |
435 | if (ret < 0) | 441 | if (ret < 0) |
436 | return; | 442 | goto out; |
437 | 443 | ||
438 | ret = i2c_master_send(client, buf, cnt + 1); | 444 | ret = i2c_master_send(client, buf, cnt + 1); |
439 | if (ret < 0) | 445 | if (ret < 0) |
440 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 446 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
447 | out: | ||
448 | mutex_unlock(&priv->mutex); | ||
441 | } | 449 | } |
442 | 450 | ||
443 | static int | 451 | static int |
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) | |||
459 | uint8_t buf[] = {REG2ADDR(reg), val}; | 467 | uint8_t buf[] = {REG2ADDR(reg), val}; |
460 | int ret; | 468 | int ret; |
461 | 469 | ||
470 | mutex_lock(&priv->mutex); | ||
462 | ret = set_page(priv, reg); | 471 | ret = set_page(priv, reg); |
463 | if (ret < 0) | 472 | if (ret < 0) |
464 | return; | 473 | goto out; |
465 | 474 | ||
466 | ret = i2c_master_send(client, buf, sizeof(buf)); | 475 | ret = i2c_master_send(client, buf, sizeof(buf)); |
467 | if (ret < 0) | 476 | if (ret < 0) |
468 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 477 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
478 | out: | ||
479 | mutex_unlock(&priv->mutex); | ||
469 | } | 480 | } |
470 | 481 | ||
471 | static void | 482 | static void |
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) | |||
475 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; | 486 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; |
476 | int ret; | 487 | int ret; |
477 | 488 | ||
489 | mutex_lock(&priv->mutex); | ||
478 | ret = set_page(priv, reg); | 490 | ret = set_page(priv, reg); |
479 | if (ret < 0) | 491 | if (ret < 0) |
480 | return; | 492 | goto out; |
481 | 493 | ||
482 | ret = i2c_master_send(client, buf, sizeof(buf)); | 494 | ret = i2c_master_send(client, buf, sizeof(buf)); |
483 | if (ret < 0) | 495 | if (ret < 0) |
484 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 496 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
497 | out: | ||
498 | mutex_unlock(&priv->mutex); | ||
485 | } | 499 | } |
486 | 500 | ||
487 | static void | 501 | static void |
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv) | |||
536 | reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); | 550 | reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); |
537 | } | 551 | } |
538 | 552 | ||
553 | /* handle HDMI connect/disconnect */ | ||
554 | static void tda998x_hpd(struct work_struct *work) | ||
555 | { | ||
556 | struct delayed_work *dwork = to_delayed_work(work); | ||
557 | struct tda998x_priv *priv = | ||
558 | container_of(dwork, struct tda998x_priv, dwork); | ||
559 | |||
560 | if (priv->encoder && priv->encoder->dev) | ||
561 | drm_kms_helper_hotplug_event(priv->encoder->dev); | ||
562 | } | ||
563 | |||
539 | /* | 564 | /* |
540 | * only 2 interrupts may occur: screen plug/unplug and EDID read | 565 | * only 2 interrupts may occur: screen plug/unplug and EDID read |
541 | */ | 566 | */ |
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) | |||
559 | priv->wq_edid_wait = 0; | 584 | priv->wq_edid_wait = 0; |
560 | wake_up(&priv->wq_edid); | 585 | wake_up(&priv->wq_edid); |
561 | } else if (cec != 0) { /* HPD change */ | 586 | } else if (cec != 0) { /* HPD change */ |
562 | if (priv->encoder && priv->encoder->dev) | 587 | schedule_delayed_work(&priv->dwork, HZ/10); |
563 | drm_helper_hpd_irq_event(priv->encoder->dev); | ||
564 | } | 588 | } |
565 | return IRQ_HANDLED; | 589 | return IRQ_HANDLED; |
566 | } | 590 | } |
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv) | |||
1170 | /* disable all IRQs and free the IRQ handler */ | 1194 | /* disable all IRQs and free the IRQ handler */ |
1171 | cec_write(priv, REG_CEC_RXSHPDINTENA, 0); | 1195 | cec_write(priv, REG_CEC_RXSHPDINTENA, 0); |
1172 | reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); | 1196 | reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); |
1173 | if (priv->hdmi->irq) | 1197 | if (priv->hdmi->irq) { |
1174 | free_irq(priv->hdmi->irq, priv); | 1198 | free_irq(priv->hdmi->irq, priv); |
1199 | cancel_delayed_work_sync(&priv->dwork); | ||
1200 | } | ||
1175 | 1201 | ||
1176 | i2c_unregister_device(priv->cec); | 1202 | i2c_unregister_device(priv->cec); |
1177 | } | 1203 | } |
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
1255 | struct device_node *np = client->dev.of_node; | 1281 | struct device_node *np = client->dev.of_node; |
1256 | u32 video; | 1282 | u32 video; |
1257 | int rev_lo, rev_hi, ret; | 1283 | int rev_lo, rev_hi, ret; |
1284 | unsigned short cec_addr; | ||
1258 | 1285 | ||
1259 | priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); | 1286 | priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); |
1260 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); | 1287 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); |
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
1262 | 1289 | ||
1263 | priv->current_page = 0xff; | 1290 | priv->current_page = 0xff; |
1264 | priv->hdmi = client; | 1291 | priv->hdmi = client; |
1265 | priv->cec = i2c_new_dummy(client->adapter, 0x34); | 1292 | /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ |
1293 | cec_addr = 0x34 + (client->addr & 0x03); | ||
1294 | priv->cec = i2c_new_dummy(client->adapter, cec_addr); | ||
1266 | if (!priv->cec) | 1295 | if (!priv->cec) |
1267 | return -ENODEV; | 1296 | return -ENODEV; |
1268 | 1297 | ||
1269 | priv->dpms = DRM_MODE_DPMS_OFF; | 1298 | priv->dpms = DRM_MODE_DPMS_OFF; |
1270 | 1299 | ||
1300 | mutex_init(&priv->mutex); /* protect the page access */ | ||
1301 | |||
1271 | /* wake up the device: */ | 1302 | /* wake up the device: */ |
1272 | cec_write(priv, REG_CEC_ENAMODS, | 1303 | cec_write(priv, REG_CEC_ENAMODS, |
1273 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); | 1304 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); |
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
1323 | if (client->irq) { | 1354 | if (client->irq) { |
1324 | int irqf_trigger; | 1355 | int irqf_trigger; |
1325 | 1356 | ||
1326 | /* init read EDID waitqueue */ | 1357 | /* init read EDID waitqueue and HDP work */ |
1327 | init_waitqueue_head(&priv->wq_edid); | 1358 | init_waitqueue_head(&priv->wq_edid); |
1359 | INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd); | ||
1328 | 1360 | ||
1329 | /* clear pending interrupts */ | 1361 | /* clear pending interrupts */ |
1330 | reg_read(priv, REG_INT_FLAGS_0); | 1362 | reg_read(priv, REG_INT_FLAGS_0); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 574057cd1d09..7643300828c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev) | |||
462 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | 462 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
463 | dev_priv->pch_type = PCH_LPT; | 463 | dev_priv->pch_type = PCH_LPT; |
464 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | 464 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
465 | WARN_ON(!IS_HASWELL(dev)); | 465 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
466 | WARN_ON(IS_HSW_ULT(dev)); | 466 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); |
467 | } else if (IS_BROADWELL(dev)) { | ||
468 | dev_priv->pch_type = PCH_LPT; | ||
469 | dev_priv->pch_id = | ||
470 | INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; | ||
471 | DRM_DEBUG_KMS("This is Broadwell, assuming " | ||
472 | "LynxPoint LP PCH\n"); | ||
473 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 467 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
474 | dev_priv->pch_type = PCH_LPT; | 468 | dev_priv->pch_type = PCH_LPT; |
475 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | 469 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
476 | WARN_ON(!IS_HASWELL(dev)); | 470 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
477 | WARN_ON(!IS_HSW_ULT(dev)); | 471 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); |
478 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { | 472 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { |
479 | dev_priv->pch_type = PCH_SPT; | 473 | dev_priv->pch_type = PCH_SPT; |
480 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | 474 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e9f891c432f8..9d7a7155bf02 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table { | |||
2159 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ | 2159 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
2160 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) | 2160 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
2161 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ | 2161 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
2162 | ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ | 2162 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
2163 | (INTEL_DEVID(dev) & 0xf) == 0x6 || \ | ||
2164 | (INTEL_DEVID(dev) & 0xf) == 0xe)) | 2163 | (INTEL_DEVID(dev) & 0xf) == 0xe)) |
2165 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ | 2164 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
2166 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | 2165 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 76354d3ba925..5f614828d365 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, | |||
3148 | u32 size = i915_gem_obj_ggtt_size(obj); | 3148 | u32 size = i915_gem_obj_ggtt_size(obj); |
3149 | uint64_t val; | 3149 | uint64_t val; |
3150 | 3150 | ||
3151 | /* Adjust fence size to match tiled area */ | ||
3152 | if (obj->tiling_mode != I915_TILING_NONE) { | ||
3153 | uint32_t row_size = obj->stride * | ||
3154 | (obj->tiling_mode == I915_TILING_Y ? 32 : 8); | ||
3155 | size = (size / row_size) * row_size; | ||
3156 | } | ||
3157 | |||
3151 | val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & | 3158 | val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & |
3152 | 0xfffff000) << 32; | 3159 | 0xfffff000) << 32; |
3153 | val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; | 3160 | val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; |
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4884 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4891 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
4885 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); | 4892 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); |
4886 | 4893 | ||
4887 | /* | 4894 | ret = i915_ppgtt_init_hw(dev); |
4888 | * XXX: Contexts should only be initialized once. Doing a switch to the | ||
4889 | * default context switch however is something we'd like to do after | ||
4890 | * reset or thaw (the latter may not actually be necessary for HW, but | ||
4891 | * goes with our code better). Context switching requires rings (for | ||
4892 | * the do_switch), but before enabling PPGTT. So don't move this. | ||
4893 | */ | ||
4894 | ret = i915_gem_context_enable(dev_priv); | ||
4895 | if (ret && ret != -EIO) { | 4895 | if (ret && ret != -EIO) { |
4896 | DRM_ERROR("Context enable failed %d\n", ret); | 4896 | DRM_ERROR("PPGTT enable failed %d\n", ret); |
4897 | i915_gem_cleanup_ringbuffer(dev); | 4897 | i915_gem_cleanup_ringbuffer(dev); |
4898 | |||
4899 | return ret; | ||
4900 | } | 4898 | } |
4901 | 4899 | ||
4902 | ret = i915_ppgtt_init_hw(dev); | 4900 | ret = i915_gem_context_enable(dev_priv); |
4903 | if (ret && ret != -EIO) { | 4901 | if (ret && ret != -EIO) { |
4904 | DRM_ERROR("PPGTT enable failed %d\n", ret); | 4902 | DRM_ERROR("Context enable failed %d\n", ret); |
4905 | i915_gem_cleanup_ringbuffer(dev); | 4903 | i915_gem_cleanup_ringbuffer(dev); |
4904 | |||
4905 | return ret; | ||
4906 | } | 4906 | } |
4907 | 4907 | ||
4908 | return ret; | 4908 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 4d63839bd9b4..dfb783a8f2c3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) | |||
962 | 962 | ||
963 | WARN_ON(panel->backlight.max == 0); | 963 | WARN_ON(panel->backlight.max == 0); |
964 | 964 | ||
965 | if (panel->backlight.level == 0) { | 965 | if (panel->backlight.level <= panel->backlight.min) { |
966 | panel->backlight.level = panel->backlight.max; | 966 | panel->backlight.level = panel->backlight.max; |
967 | if (panel->backlight.device) | 967 | if (panel->backlight.device) |
968 | panel->backlight.device->props.brightness = | 968 | panel->backlight.device->props.brightness = |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index a0133c74f4cf..42cd0cffe210 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev, | |||
816 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 816 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
817 | if (flags & R600_PTE_SYSTEM) { | 817 | if (flags & R600_PTE_SYSTEM) { |
818 | value = radeon_vm_map_gart(rdev, addr); | 818 | value = radeon_vm_map_gart(rdev, addr); |
819 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
820 | } else if (flags & R600_PTE_VALID) { | 819 | } else if (flags & R600_PTE_VALID) { |
821 | value = addr; | 820 | value = addr; |
822 | } else { | 821 | } else { |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 4be2bb7cbef3..ce787a9f12c0 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c | |||
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev, | |||
372 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 372 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
373 | if (flags & R600_PTE_SYSTEM) { | 373 | if (flags & R600_PTE_SYSTEM) { |
374 | value = radeon_vm_map_gart(rdev, addr); | 374 | value = radeon_vm_map_gart(rdev, addr); |
375 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
376 | } else if (flags & R600_PTE_VALID) { | 375 | } else if (flags & R600_PTE_VALID) { |
377 | value = addr; | 376 | value = addr; |
378 | } else { | 377 | } else { |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 74f06d540591..279801ca5110 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
644 | return r; | 644 | return r; |
645 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 645 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
646 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; | 646 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
647 | rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; | ||
647 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; | 648 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
648 | return radeon_gart_table_ram_alloc(rdev); | 649 | return radeon_gart_table_ram_alloc(rdev); |
649 | } | 650 | } |
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) | |||
681 | WREG32(RADEON_AIC_HI_ADDR, 0); | 682 | WREG32(RADEON_AIC_HI_ADDR, 0); |
682 | } | 683 | } |
683 | 684 | ||
685 | uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) | ||
686 | { | ||
687 | return addr; | ||
688 | } | ||
689 | |||
684 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, | 690 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
685 | uint64_t addr, uint32_t flags) | 691 | uint64_t entry) |
686 | { | 692 | { |
687 | u32 *gtt = rdev->gart.ptr; | 693 | u32 *gtt = rdev->gart.ptr; |
688 | gtt[i] = cpu_to_le32(lower_32_bits(addr)); | 694 | gtt[i] = cpu_to_le32(lower_32_bits(entry)); |
689 | } | 695 | } |
690 | 696 | ||
691 | void r100_pci_gart_fini(struct radeon_device *rdev) | 697 | void r100_pci_gart_fini(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 064ad5569cca..08d68f3e13e9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
73 | #define R300_PTE_WRITEABLE (1 << 2) | 73 | #define R300_PTE_WRITEABLE (1 << 2) |
74 | #define R300_PTE_READABLE (1 << 3) | 74 | #define R300_PTE_READABLE (1 << 3) |
75 | 75 | ||
76 | void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | 76 | uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) |
77 | uint64_t addr, uint32_t flags) | ||
78 | { | 77 | { |
79 | void __iomem *ptr = rdev->gart.ptr; | ||
80 | |||
81 | addr = (lower_32_bits(addr) >> 8) | | 78 | addr = (lower_32_bits(addr) >> 8) | |
82 | ((upper_32_bits(addr) & 0xff) << 24); | 79 | ((upper_32_bits(addr) & 0xff) << 24); |
83 | if (flags & RADEON_GART_PAGE_READ) | 80 | if (flags & RADEON_GART_PAGE_READ) |
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
86 | addr |= R300_PTE_WRITEABLE; | 83 | addr |= R300_PTE_WRITEABLE; |
87 | if (!(flags & RADEON_GART_PAGE_SNOOP)) | 84 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
88 | addr |= R300_PTE_UNSNOOPED; | 85 | addr |= R300_PTE_UNSNOOPED; |
86 | return addr; | ||
87 | } | ||
88 | |||
89 | void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
90 | uint64_t entry) | ||
91 | { | ||
92 | void __iomem *ptr = rdev->gart.ptr; | ||
93 | |||
89 | /* on x86 we want this to be CPU endian, on powerpc | 94 | /* on x86 we want this to be CPU endian, on powerpc |
90 | * on powerpc without HW swappers, it'll get swapped on way | 95 | * on powerpc without HW swappers, it'll get swapped on way |
91 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | 96 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
92 | writel(addr, ((void __iomem *)ptr) + (i * 4)); | 97 | writel(entry, ((void __iomem *)ptr) + (i * 4)); |
93 | } | 98 | } |
94 | 99 | ||
95 | int rv370_pcie_gart_init(struct radeon_device *rdev) | 100 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
109 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); | 114 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
110 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 115 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
111 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; | 116 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
117 | rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; | ||
112 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; | 118 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
113 | return radeon_gart_table_vram_alloc(rdev); | 119 | return radeon_gart_table_vram_alloc(rdev); |
114 | } | 120 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 54529b837afa..3f2a8d3febca 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev); | |||
242 | * Dummy page | 242 | * Dummy page |
243 | */ | 243 | */ |
244 | struct radeon_dummy_page { | 244 | struct radeon_dummy_page { |
245 | uint64_t entry; | ||
245 | struct page *page; | 246 | struct page *page; |
246 | dma_addr_t addr; | 247 | dma_addr_t addr; |
247 | }; | 248 | }; |
@@ -645,7 +646,7 @@ struct radeon_gart { | |||
645 | unsigned num_cpu_pages; | 646 | unsigned num_cpu_pages; |
646 | unsigned table_size; | 647 | unsigned table_size; |
647 | struct page **pages; | 648 | struct page **pages; |
648 | dma_addr_t *pages_addr; | 649 | uint64_t *pages_entry; |
649 | bool ready; | 650 | bool ready; |
650 | }; | 651 | }; |
651 | 652 | ||
@@ -1847,8 +1848,9 @@ struct radeon_asic { | |||
1847 | /* gart */ | 1848 | /* gart */ |
1848 | struct { | 1849 | struct { |
1849 | void (*tlb_flush)(struct radeon_device *rdev); | 1850 | void (*tlb_flush)(struct radeon_device *rdev); |
1851 | uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); | ||
1850 | void (*set_page)(struct radeon_device *rdev, unsigned i, | 1852 | void (*set_page)(struct radeon_device *rdev, unsigned i, |
1851 | uint64_t addr, uint32_t flags); | 1853 | uint64_t entry); |
1852 | } gart; | 1854 | } gart; |
1853 | struct { | 1855 | struct { |
1854 | int (*init)(struct radeon_device *rdev); | 1856 | int (*init)(struct radeon_device *rdev); |
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) | |||
2852 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) | 2854 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
2853 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) | 2855 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) |
2854 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) | 2856 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) |
2855 | #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) | 2857 | #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) |
2858 | #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) | ||
2856 | #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) | 2859 | #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) |
2857 | #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) | 2860 | #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) |
2858 | #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) | 2861 | #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 121aff6a3b41..ed0e10eee2dc 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) | |||
159 | DRM_INFO("Forcing AGP to PCIE mode\n"); | 159 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
160 | rdev->flags |= RADEON_IS_PCIE; | 160 | rdev->flags |= RADEON_IS_PCIE; |
161 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; | 161 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
162 | rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; | ||
162 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; | 163 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
163 | } else { | 164 | } else { |
164 | DRM_INFO("Forcing AGP to PCI mode\n"); | 165 | DRM_INFO("Forcing AGP to PCI mode\n"); |
165 | rdev->flags |= RADEON_IS_PCI; | 166 | rdev->flags |= RADEON_IS_PCI; |
166 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; | 167 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
168 | rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; | ||
167 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; | 169 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
168 | } | 170 | } |
169 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 171 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { | |||
199 | .mc_wait_for_idle = &r100_mc_wait_for_idle, | 201 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
200 | .gart = { | 202 | .gart = { |
201 | .tlb_flush = &r100_pci_gart_tlb_flush, | 203 | .tlb_flush = &r100_pci_gart_tlb_flush, |
204 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
202 | .set_page = &r100_pci_gart_set_page, | 205 | .set_page = &r100_pci_gart_set_page, |
203 | }, | 206 | }, |
204 | .ring = { | 207 | .ring = { |
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { | |||
265 | .mc_wait_for_idle = &r100_mc_wait_for_idle, | 268 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
266 | .gart = { | 269 | .gart = { |
267 | .tlb_flush = &r100_pci_gart_tlb_flush, | 270 | .tlb_flush = &r100_pci_gart_tlb_flush, |
271 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
268 | .set_page = &r100_pci_gart_set_page, | 272 | .set_page = &r100_pci_gart_set_page, |
269 | }, | 273 | }, |
270 | .ring = { | 274 | .ring = { |
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = { | |||
359 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 363 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
360 | .gart = { | 364 | .gart = { |
361 | .tlb_flush = &r100_pci_gart_tlb_flush, | 365 | .tlb_flush = &r100_pci_gart_tlb_flush, |
366 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
362 | .set_page = &r100_pci_gart_set_page, | 367 | .set_page = &r100_pci_gart_set_page, |
363 | }, | 368 | }, |
364 | .ring = { | 369 | .ring = { |
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { | |||
425 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 430 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
426 | .gart = { | 431 | .gart = { |
427 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 432 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
433 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
428 | .set_page = &rv370_pcie_gart_set_page, | 434 | .set_page = &rv370_pcie_gart_set_page, |
429 | }, | 435 | }, |
430 | .ring = { | 436 | .ring = { |
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = { | |||
491 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 497 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
492 | .gart = { | 498 | .gart = { |
493 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 499 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
500 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
494 | .set_page = &rv370_pcie_gart_set_page, | 501 | .set_page = &rv370_pcie_gart_set_page, |
495 | }, | 502 | }, |
496 | .ring = { | 503 | .ring = { |
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = { | |||
557 | .mc_wait_for_idle = &rs400_mc_wait_for_idle, | 564 | .mc_wait_for_idle = &rs400_mc_wait_for_idle, |
558 | .gart = { | 565 | .gart = { |
559 | .tlb_flush = &rs400_gart_tlb_flush, | 566 | .tlb_flush = &rs400_gart_tlb_flush, |
567 | .get_page_entry = &rs400_gart_get_page_entry, | ||
560 | .set_page = &rs400_gart_set_page, | 568 | .set_page = &rs400_gart_set_page, |
561 | }, | 569 | }, |
562 | .ring = { | 570 | .ring = { |
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = { | |||
623 | .mc_wait_for_idle = &rs600_mc_wait_for_idle, | 631 | .mc_wait_for_idle = &rs600_mc_wait_for_idle, |
624 | .gart = { | 632 | .gart = { |
625 | .tlb_flush = &rs600_gart_tlb_flush, | 633 | .tlb_flush = &rs600_gart_tlb_flush, |
634 | .get_page_entry = &rs600_gart_get_page_entry, | ||
626 | .set_page = &rs600_gart_set_page, | 635 | .set_page = &rs600_gart_set_page, |
627 | }, | 636 | }, |
628 | .ring = { | 637 | .ring = { |
@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = { | |||
691 | .mc_wait_for_idle = &rs690_mc_wait_for_idle, | 700 | .mc_wait_for_idle = &rs690_mc_wait_for_idle, |
692 | .gart = { | 701 | .gart = { |
693 | .tlb_flush = &rs400_gart_tlb_flush, | 702 | .tlb_flush = &rs400_gart_tlb_flush, |
703 | .get_page_entry = &rs400_gart_get_page_entry, | ||
694 | .set_page = &rs400_gart_set_page, | 704 | .set_page = &rs400_gart_set_page, |
695 | }, | 705 | }, |
696 | .ring = { | 706 | .ring = { |
@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = { | |||
759 | .mc_wait_for_idle = &rv515_mc_wait_for_idle, | 769 | .mc_wait_for_idle = &rv515_mc_wait_for_idle, |
760 | .gart = { | 770 | .gart = { |
761 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 771 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
772 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
762 | .set_page = &rv370_pcie_gart_set_page, | 773 | .set_page = &rv370_pcie_gart_set_page, |
763 | }, | 774 | }, |
764 | .ring = { | 775 | .ring = { |
@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = { | |||
825 | .mc_wait_for_idle = &r520_mc_wait_for_idle, | 836 | .mc_wait_for_idle = &r520_mc_wait_for_idle, |
826 | .gart = { | 837 | .gart = { |
827 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 838 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
839 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
828 | .set_page = &rv370_pcie_gart_set_page, | 840 | .set_page = &rv370_pcie_gart_set_page, |
829 | }, | 841 | }, |
830 | .ring = { | 842 | .ring = { |
@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = { | |||
919 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 931 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
920 | .gart = { | 932 | .gart = { |
921 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 933 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
934 | .get_page_entry = &rs600_gart_get_page_entry, | ||
922 | .set_page = &rs600_gart_set_page, | 935 | .set_page = &rs600_gart_set_page, |
923 | }, | 936 | }, |
924 | .ring = { | 937 | .ring = { |
@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = { | |||
1004 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1017 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1005 | .gart = { | 1018 | .gart = { |
1006 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1019 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
1020 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1007 | .set_page = &rs600_gart_set_page, | 1021 | .set_page = &rs600_gart_set_page, |
1008 | }, | 1022 | }, |
1009 | .ring = { | 1023 | .ring = { |
@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = { | |||
1095 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1109 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1096 | .gart = { | 1110 | .gart = { |
1097 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1111 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
1112 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1098 | .set_page = &rs600_gart_set_page, | 1113 | .set_page = &rs600_gart_set_page, |
1099 | }, | 1114 | }, |
1100 | .ring = { | 1115 | .ring = { |
@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = { | |||
1199 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1214 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1200 | .gart = { | 1215 | .gart = { |
1201 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1216 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
1217 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1202 | .set_page = &rs600_gart_set_page, | 1218 | .set_page = &rs600_gart_set_page, |
1203 | }, | 1219 | }, |
1204 | .ring = { | 1220 | .ring = { |
@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = { | |||
1317 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1333 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1318 | .gart = { | 1334 | .gart = { |
1319 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1335 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
1336 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1320 | .set_page = &rs600_gart_set_page, | 1337 | .set_page = &rs600_gart_set_page, |
1321 | }, | 1338 | }, |
1322 | .ring = { | 1339 | .ring = { |
@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = { | |||
1409 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1426 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1410 | .gart = { | 1427 | .gart = { |
1411 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1428 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
1429 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1412 | .set_page = &rs600_gart_set_page, | 1430 | .set_page = &rs600_gart_set_page, |
1413 | }, | 1431 | }, |
1414 | .ring = { | 1432 | .ring = { |
@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = { | |||
1500 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1518 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1501 | .gart = { | 1519 | .gart = { |
1502 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1520 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
1521 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1503 | .set_page = &rs600_gart_set_page, | 1522 | .set_page = &rs600_gart_set_page, |
1504 | }, | 1523 | }, |
1505 | .ring = { | 1524 | .ring = { |
@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = { | |||
1635 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1654 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1636 | .gart = { | 1655 | .gart = { |
1637 | .tlb_flush = &cayman_pcie_gart_tlb_flush, | 1656 | .tlb_flush = &cayman_pcie_gart_tlb_flush, |
1657 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1638 | .set_page = &rs600_gart_set_page, | 1658 | .set_page = &rs600_gart_set_page, |
1639 | }, | 1659 | }, |
1640 | .vm = { | 1660 | .vm = { |
@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = { | |||
1738 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1758 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1739 | .gart = { | 1759 | .gart = { |
1740 | .tlb_flush = &cayman_pcie_gart_tlb_flush, | 1760 | .tlb_flush = &cayman_pcie_gart_tlb_flush, |
1761 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1741 | .set_page = &rs600_gart_set_page, | 1762 | .set_page = &rs600_gart_set_page, |
1742 | }, | 1763 | }, |
1743 | .vm = { | 1764 | .vm = { |
@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = { | |||
1871 | .get_gpu_clock_counter = &si_get_gpu_clock_counter, | 1892 | .get_gpu_clock_counter = &si_get_gpu_clock_counter, |
1872 | .gart = { | 1893 | .gart = { |
1873 | .tlb_flush = &si_pcie_gart_tlb_flush, | 1894 | .tlb_flush = &si_pcie_gart_tlb_flush, |
1895 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1874 | .set_page = &rs600_gart_set_page, | 1896 | .set_page = &rs600_gart_set_page, |
1875 | }, | 1897 | }, |
1876 | .vm = { | 1898 | .vm = { |
@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = { | |||
2032 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, | 2054 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
2033 | .gart = { | 2055 | .gart = { |
2034 | .tlb_flush = &cik_pcie_gart_tlb_flush, | 2056 | .tlb_flush = &cik_pcie_gart_tlb_flush, |
2057 | .get_page_entry = &rs600_gart_get_page_entry, | ||
2035 | .set_page = &rs600_gart_set_page, | 2058 | .set_page = &rs600_gart_set_page, |
2036 | }, | 2059 | }, |
2037 | .vm = { | 2060 | .vm = { |
@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = { | |||
2139 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, | 2162 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
2140 | .gart = { | 2163 | .gart = { |
2141 | .tlb_flush = &cik_pcie_gart_tlb_flush, | 2164 | .tlb_flush = &cik_pcie_gart_tlb_flush, |
2165 | .get_page_entry = &rs600_gart_get_page_entry, | ||
2142 | .set_page = &rs600_gart_set_page, | 2166 | .set_page = &rs600_gart_set_page, |
2143 | }, | 2167 | }, |
2144 | .vm = { | 2168 | .vm = { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 2a45d548d5ec..8d787d115653 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); | |||
67 | int r100_asic_reset(struct radeon_device *rdev); | 67 | int r100_asic_reset(struct radeon_device *rdev); |
68 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 68 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
69 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 69 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
70 | uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
70 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, | 71 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
71 | uint64_t addr, uint32_t flags); | 72 | uint64_t entry); |
72 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); | 73 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); |
73 | int r100_irq_set(struct radeon_device *rdev); | 74 | int r100_irq_set(struct radeon_device *rdev); |
74 | int r100_irq_process(struct radeon_device *rdev); | 75 | int r100_irq_process(struct radeon_device *rdev); |
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, | |||
172 | struct radeon_fence *fence); | 173 | struct radeon_fence *fence); |
173 | extern int r300_cs_parse(struct radeon_cs_parser *p); | 174 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
174 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 175 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
176 | extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
175 | extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | 177 | extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
176 | uint64_t addr, uint32_t flags); | 178 | uint64_t entry); |
177 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 179 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
178 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); | 180 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
179 | extern void r300_set_reg_safe(struct radeon_device *rdev); | 181 | extern void r300_set_reg_safe(struct radeon_device *rdev); |
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); | |||
208 | extern int rs400_suspend(struct radeon_device *rdev); | 210 | extern int rs400_suspend(struct radeon_device *rdev); |
209 | extern int rs400_resume(struct radeon_device *rdev); | 211 | extern int rs400_resume(struct radeon_device *rdev); |
210 | void rs400_gart_tlb_flush(struct radeon_device *rdev); | 212 | void rs400_gart_tlb_flush(struct radeon_device *rdev); |
213 | uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
211 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | 214 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, |
212 | uint64_t addr, uint32_t flags); | 215 | uint64_t entry); |
213 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 216 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
214 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 217 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
215 | int rs400_gart_init(struct radeon_device *rdev); | 218 | int rs400_gart_init(struct radeon_device *rdev); |
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); | |||
232 | void rs600_irq_disable(struct radeon_device *rdev); | 235 | void rs600_irq_disable(struct radeon_device *rdev); |
233 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 236 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
234 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 237 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
238 | uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
235 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | 239 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, |
236 | uint64_t addr, uint32_t flags); | 240 | uint64_t entry); |
237 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 241 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
238 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 242 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
239 | void rs600_bandwidth_update(struct radeon_device *rdev); | 243 | void rs600_bandwidth_update(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 9e7f23dd14bd..87d5fb21cb61 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -34,7 +34,8 @@ | |||
34 | 34 | ||
35 | static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, | 35 | static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, |
36 | uint64_t saddr, uint64_t daddr, | 36 | uint64_t saddr, uint64_t daddr, |
37 | int flag, int n) | 37 | int flag, int n, |
38 | struct reservation_object *resv) | ||
38 | { | 39 | { |
39 | unsigned long start_jiffies; | 40 | unsigned long start_jiffies; |
40 | unsigned long end_jiffies; | 41 | unsigned long end_jiffies; |
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, | |||
47 | case RADEON_BENCHMARK_COPY_DMA: | 48 | case RADEON_BENCHMARK_COPY_DMA: |
48 | fence = radeon_copy_dma(rdev, saddr, daddr, | 49 | fence = radeon_copy_dma(rdev, saddr, daddr, |
49 | size / RADEON_GPU_PAGE_SIZE, | 50 | size / RADEON_GPU_PAGE_SIZE, |
50 | NULL); | 51 | resv); |
51 | break; | 52 | break; |
52 | case RADEON_BENCHMARK_COPY_BLIT: | 53 | case RADEON_BENCHMARK_COPY_BLIT: |
53 | fence = radeon_copy_blit(rdev, saddr, daddr, | 54 | fence = radeon_copy_blit(rdev, saddr, daddr, |
54 | size / RADEON_GPU_PAGE_SIZE, | 55 | size / RADEON_GPU_PAGE_SIZE, |
55 | NULL); | 56 | resv); |
56 | break; | 57 | break; |
57 | default: | 58 | default: |
58 | DRM_ERROR("Unknown copy method\n"); | 59 | DRM_ERROR("Unknown copy method\n"); |
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
120 | 121 | ||
121 | if (rdev->asic->copy.dma) { | 122 | if (rdev->asic->copy.dma) { |
122 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | 123 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
123 | RADEON_BENCHMARK_COPY_DMA, n); | 124 | RADEON_BENCHMARK_COPY_DMA, n, |
125 | dobj->tbo.resv); | ||
124 | if (time < 0) | 126 | if (time < 0) |
125 | goto out_cleanup; | 127 | goto out_cleanup; |
126 | if (time > 0) | 128 | if (time > 0) |
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
130 | 132 | ||
131 | if (rdev->asic->copy.blit) { | 133 | if (rdev->asic->copy.blit) { |
132 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | 134 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
133 | RADEON_BENCHMARK_COPY_BLIT, n); | 135 | RADEON_BENCHMARK_COPY_BLIT, n, |
136 | dobj->tbo.resv); | ||
134 | if (time < 0) | 137 | if (time < 0) |
135 | goto out_cleanup; | 138 | goto out_cleanup; |
136 | if (time > 0) | 139 | if (time > 0) |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ec65168f331..bd7519fdd3f4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) | |||
774 | rdev->dummy_page.page = NULL; | 774 | rdev->dummy_page.page = NULL; |
775 | return -ENOMEM; | 775 | return -ENOMEM; |
776 | } | 776 | } |
777 | rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, | ||
778 | RADEON_GART_PAGE_DUMMY); | ||
777 | return 0; | 779 | return 0; |
778 | } | 780 | } |
779 | 781 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 102116902a07..913fafa597ad 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
960 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && | 960 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && |
961 | pll->flags & RADEON_PLL_USE_REF_DIV) | 961 | pll->flags & RADEON_PLL_USE_REF_DIV) |
962 | ref_div_max = pll->reference_div; | 962 | ref_div_max = pll->reference_div; |
963 | else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) | ||
964 | /* fix for problems on RS880 */ | ||
965 | ref_div_max = min(pll->max_ref_div, 7u); | ||
963 | else | 966 | else |
964 | ref_div_max = pll->max_ref_div; | 967 | ref_div_max = pll->max_ref_div; |
965 | 968 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..5450fa95a47e 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) | |||
165 | radeon_bo_unpin(rdev->gart.robj); | 165 | radeon_bo_unpin(rdev->gart.robj); |
166 | radeon_bo_unreserve(rdev->gart.robj); | 166 | radeon_bo_unreserve(rdev->gart.robj); |
167 | rdev->gart.table_addr = gpu_addr; | 167 | rdev->gart.table_addr = gpu_addr; |
168 | |||
169 | if (!r) { | ||
170 | int i; | ||
171 | |||
172 | /* We might have dropped some GART table updates while it wasn't | ||
173 | * mapped, restore all entries | ||
174 | */ | ||
175 | for (i = 0; i < rdev->gart.num_gpu_pages; i++) | ||
176 | radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); | ||
177 | mb(); | ||
178 | radeon_gart_tlb_flush(rdev); | ||
179 | } | ||
180 | |||
168 | return r; | 181 | return r; |
169 | } | 182 | } |
170 | 183 | ||
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
228 | unsigned t; | 241 | unsigned t; |
229 | unsigned p; | 242 | unsigned p; |
230 | int i, j; | 243 | int i, j; |
231 | u64 page_base; | ||
232 | 244 | ||
233 | if (!rdev->gart.ready) { | 245 | if (!rdev->gart.ready) { |
234 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); | 246 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
239 | for (i = 0; i < pages; i++, p++) { | 251 | for (i = 0; i < pages; i++, p++) { |
240 | if (rdev->gart.pages[p]) { | 252 | if (rdev->gart.pages[p]) { |
241 | rdev->gart.pages[p] = NULL; | 253 | rdev->gart.pages[p] = NULL; |
242 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; | ||
243 | page_base = rdev->gart.pages_addr[p]; | ||
244 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 254 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
255 | rdev->gart.pages_entry[t] = rdev->dummy_page.entry; | ||
245 | if (rdev->gart.ptr) { | 256 | if (rdev->gart.ptr) { |
246 | radeon_gart_set_page(rdev, t, page_base, | 257 | radeon_gart_set_page(rdev, t, |
247 | RADEON_GART_PAGE_DUMMY); | 258 | rdev->dummy_page.entry); |
248 | } | 259 | } |
249 | page_base += RADEON_GPU_PAGE_SIZE; | ||
250 | } | 260 | } |
251 | } | 261 | } |
252 | } | 262 | } |
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
274 | { | 284 | { |
275 | unsigned t; | 285 | unsigned t; |
276 | unsigned p; | 286 | unsigned p; |
277 | uint64_t page_base; | 287 | uint64_t page_base, page_entry; |
278 | int i, j; | 288 | int i, j; |
279 | 289 | ||
280 | if (!rdev->gart.ready) { | 290 | if (!rdev->gart.ready) { |
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
285 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 295 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
286 | 296 | ||
287 | for (i = 0; i < pages; i++, p++) { | 297 | for (i = 0; i < pages; i++, p++) { |
288 | rdev->gart.pages_addr[p] = dma_addr[i]; | ||
289 | rdev->gart.pages[p] = pagelist[i]; | 298 | rdev->gart.pages[p] = pagelist[i]; |
290 | if (rdev->gart.ptr) { | 299 | page_base = dma_addr[i]; |
291 | page_base = rdev->gart.pages_addr[p]; | 300 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
292 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 301 | page_entry = radeon_gart_get_page_entry(page_base, flags); |
293 | radeon_gart_set_page(rdev, t, page_base, flags); | 302 | rdev->gart.pages_entry[t] = page_entry; |
294 | page_base += RADEON_GPU_PAGE_SIZE; | 303 | if (rdev->gart.ptr) { |
304 | radeon_gart_set_page(rdev, t, page_entry); | ||
295 | } | 305 | } |
306 | page_base += RADEON_GPU_PAGE_SIZE; | ||
296 | } | 307 | } |
297 | } | 308 | } |
298 | mb(); | 309 | mb(); |
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
334 | radeon_gart_fini(rdev); | 345 | radeon_gart_fini(rdev); |
335 | return -ENOMEM; | 346 | return -ENOMEM; |
336 | } | 347 | } |
337 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * | 348 | rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * |
338 | rdev->gart.num_cpu_pages); | 349 | rdev->gart.num_gpu_pages); |
339 | if (rdev->gart.pages_addr == NULL) { | 350 | if (rdev->gart.pages_entry == NULL) { |
340 | radeon_gart_fini(rdev); | 351 | radeon_gart_fini(rdev); |
341 | return -ENOMEM; | 352 | return -ENOMEM; |
342 | } | 353 | } |
343 | /* set GART entry to point to the dummy page by default */ | 354 | /* set GART entry to point to the dummy page by default */ |
344 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { | 355 | for (i = 0; i < rdev->gart.num_gpu_pages; i++) |
345 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; | 356 | rdev->gart.pages_entry[i] = rdev->dummy_page.entry; |
346 | } | ||
347 | return 0; | 357 | return 0; |
348 | } | 358 | } |
349 | 359 | ||
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
356 | */ | 366 | */ |
357 | void radeon_gart_fini(struct radeon_device *rdev) | 367 | void radeon_gart_fini(struct radeon_device *rdev) |
358 | { | 368 | { |
359 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { | 369 | if (rdev->gart.ready) { |
360 | /* unbind pages */ | 370 | /* unbind pages */ |
361 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); | 371 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
362 | } | 372 | } |
363 | rdev->gart.ready = false; | 373 | rdev->gart.ready = false; |
364 | vfree(rdev->gart.pages); | 374 | vfree(rdev->gart.pages); |
365 | vfree(rdev->gart.pages_addr); | 375 | vfree(rdev->gart.pages_entry); |
366 | rdev->gart.pages = NULL; | 376 | rdev->gart.pages = NULL; |
367 | rdev->gart.pages_addr = NULL; | 377 | rdev->gart.pages_entry = NULL; |
368 | 378 | ||
369 | radeon_dummy_page_fini(rdev); | 379 | radeon_dummy_page_fini(rdev); |
370 | } | 380 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d0b4f7d1140d..ac3c1310b953 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
146 | struct radeon_bo_va *bo_va; | 146 | struct radeon_bo_va *bo_va; |
147 | int r; | 147 | int r; |
148 | 148 | ||
149 | if (rdev->family < CHIP_CAYMAN) { | 149 | if ((rdev->family < CHIP_CAYMAN) || |
150 | (!rdev->accel_working)) { | ||
150 | return 0; | 151 | return 0; |
151 | } | 152 | } |
152 | 153 | ||
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj, | |||
176 | struct radeon_bo_va *bo_va; | 177 | struct radeon_bo_va *bo_va; |
177 | int r; | 178 | int r; |
178 | 179 | ||
179 | if (rdev->family < CHIP_CAYMAN) { | 180 | if ((rdev->family < CHIP_CAYMAN) || |
181 | (!rdev->accel_working)) { | ||
180 | return; | 182 | return; |
181 | } | 183 | } |
182 | 184 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 8bf87f1203cc..bef9a0953284 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c | |||
@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd) | |||
436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, | 436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, |
437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) | 437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) |
438 | { | 438 | { |
439 | uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; | 439 | uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; |
440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); | 440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); |
441 | 441 | ||
442 | lock_srbm(kgd, mec, pipe, 0, 0); | 442 | lock_srbm(kgd, mec, pipe, 0, 0); |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3cf9c1fa6475..686411e4e4f6 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
605 | return -ENOMEM; | 605 | return -ENOMEM; |
606 | } | 606 | } |
607 | 607 | ||
608 | vm = &fpriv->vm; | ||
609 | r = radeon_vm_init(rdev, vm); | ||
610 | if (r) { | ||
611 | kfree(fpriv); | ||
612 | return r; | ||
613 | } | ||
614 | |||
615 | if (rdev->accel_working) { | 608 | if (rdev->accel_working) { |
609 | vm = &fpriv->vm; | ||
610 | r = radeon_vm_init(rdev, vm); | ||
611 | if (r) { | ||
612 | kfree(fpriv); | ||
613 | return r; | ||
614 | } | ||
615 | |||
616 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | 616 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
617 | if (r) { | 617 | if (r) { |
618 | radeon_vm_fini(rdev, vm); | 618 | radeon_vm_fini(rdev, vm); |
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev, | |||
668 | radeon_vm_bo_rmv(rdev, vm->ib_bo_va); | 668 | radeon_vm_bo_rmv(rdev, vm->ib_bo_va); |
669 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | 669 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
670 | } | 670 | } |
671 | radeon_vm_fini(rdev, vm); | ||
671 | } | 672 | } |
672 | 673 | ||
673 | radeon_vm_fini(rdev, vm); | ||
674 | kfree(fpriv); | 674 | kfree(fpriv); |
675 | file_priv->driver_priv = NULL; | 675 | file_priv->driver_priv = NULL; |
676 | } | 676 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 07b506b41008..791818165c76 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) | |||
119 | if (ring == R600_RING_TYPE_DMA_INDEX) | 119 | if (ring == R600_RING_TYPE_DMA_INDEX) |
120 | fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, | 120 | fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, |
121 | size / RADEON_GPU_PAGE_SIZE, | 121 | size / RADEON_GPU_PAGE_SIZE, |
122 | NULL); | 122 | vram_obj->tbo.resv); |
123 | else | 123 | else |
124 | fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, | 124 | fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, |
125 | size / RADEON_GPU_PAGE_SIZE, | 125 | size / RADEON_GPU_PAGE_SIZE, |
126 | NULL); | 126 | vram_obj->tbo.resv); |
127 | if (IS_ERR(fence)) { | 127 | if (IS_ERR(fence)) { |
128 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); | 128 | DRM_ERROR("Failed GTT->VRAM copy %d\n", i); |
129 | r = PTR_ERR(fence); | 129 | r = PTR_ERR(fence); |
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) | |||
170 | if (ring == R600_RING_TYPE_DMA_INDEX) | 170 | if (ring == R600_RING_TYPE_DMA_INDEX) |
171 | fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, | 171 | fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, |
172 | size / RADEON_GPU_PAGE_SIZE, | 172 | size / RADEON_GPU_PAGE_SIZE, |
173 | NULL); | 173 | vram_obj->tbo.resv); |
174 | else | 174 | else |
175 | fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, | 175 | fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, |
176 | size / RADEON_GPU_PAGE_SIZE, | 176 | size / RADEON_GPU_PAGE_SIZE, |
177 | NULL); | 177 | vram_obj->tbo.resv); |
178 | if (IS_ERR(fence)) { | 178 | if (IS_ERR(fence)) { |
179 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); | 179 | DRM_ERROR("Failed VRAM->GTT copy %d\n", i); |
180 | r = PTR_ERR(fence); | 180 | r = PTR_ERR(fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index cde48c42b30a..2a5a4a9e772d 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | |||
587 | uint64_t result; | 587 | uint64_t result; |
588 | 588 | ||
589 | /* page table offset */ | 589 | /* page table offset */ |
590 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; | 590 | result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; |
591 | 591 | result &= ~RADEON_GPU_PAGE_MASK; | |
592 | /* in case cpu page size != gpu page size*/ | ||
593 | result |= addr & (~PAGE_MASK); | ||
594 | 592 | ||
595 | return result; | 593 | return result; |
596 | } | 594 | } |
@@ -745,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, | |||
745 | */ | 743 | */ |
746 | 744 | ||
747 | /* NI is optimized for 256KB fragments, SI and newer for 64KB */ | 745 | /* NI is optimized for 256KB fragments, SI and newer for 64KB */ |
748 | uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? | 746 | uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || |
747 | (rdev->family == CHIP_ARUBA)) ? | ||
749 | R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; | 748 | R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; |
750 | uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; | 749 | uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || |
750 | (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; | ||
751 | 751 | ||
752 | uint64_t frag_start = ALIGN(pe_start, frag_align); | 752 | uint64_t frag_start = ALIGN(pe_start, frag_align); |
753 | uint64_t frag_end = pe_end & ~(frag_align - 1); | 753 | uint64_t frag_end = pe_end & ~(frag_align - 1); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) | |||
212 | #define RS400_PTE_WRITEABLE (1 << 2) | 212 | #define RS400_PTE_WRITEABLE (1 << 2) |
213 | #define RS400_PTE_READABLE (1 << 3) | 213 | #define RS400_PTE_READABLE (1 << 3) |
214 | 214 | ||
215 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | 215 | uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) |
216 | uint64_t addr, uint32_t flags) | ||
217 | { | 216 | { |
218 | uint32_t entry; | 217 | uint32_t entry; |
219 | u32 *gtt = rdev->gart.ptr; | ||
220 | 218 | ||
221 | entry = (lower_32_bits(addr) & PAGE_MASK) | | 219 | entry = (lower_32_bits(addr) & PAGE_MASK) | |
222 | ((upper_32_bits(addr) & 0xff) << 4); | 220 | ((upper_32_bits(addr) & 0xff) << 4); |
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
226 | entry |= RS400_PTE_WRITEABLE; | 224 | entry |= RS400_PTE_WRITEABLE; |
227 | if (!(flags & RADEON_GART_PAGE_SNOOP)) | 225 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
228 | entry |= RS400_PTE_UNSNOOPED; | 226 | entry |= RS400_PTE_UNSNOOPED; |
229 | entry = cpu_to_le32(entry); | 227 | return entry; |
230 | gtt[i] = entry; | 228 | } |
229 | |||
230 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
231 | uint64_t entry) | ||
232 | { | ||
233 | u32 *gtt = rdev->gart.ptr; | ||
234 | gtt[i] = cpu_to_le32(lower_32_bits(entry)); | ||
231 | } | 235 | } |
232 | 236 | ||
233 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) | 237 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9acb1c3c005b..74bce91aecc1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) | |||
625 | radeon_gart_table_vram_free(rdev); | 625 | radeon_gart_table_vram_free(rdev); |
626 | } | 626 | } |
627 | 627 | ||
628 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | 628 | uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) |
629 | uint64_t addr, uint32_t flags) | ||
630 | { | 629 | { |
631 | void __iomem *ptr = (void *)rdev->gart.ptr; | ||
632 | |||
633 | addr = addr & 0xFFFFFFFFFFFFF000ULL; | 630 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
634 | addr |= R600_PTE_SYSTEM; | 631 | addr |= R600_PTE_SYSTEM; |
635 | if (flags & RADEON_GART_PAGE_VALID) | 632 | if (flags & RADEON_GART_PAGE_VALID) |
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
640 | addr |= R600_PTE_WRITEABLE; | 637 | addr |= R600_PTE_WRITEABLE; |
641 | if (flags & RADEON_GART_PAGE_SNOOP) | 638 | if (flags & RADEON_GART_PAGE_SNOOP) |
642 | addr |= R600_PTE_SNOOPED; | 639 | addr |= R600_PTE_SNOOPED; |
643 | writeq(addr, ptr + (i * 8)); | 640 | return addr; |
641 | } | ||
642 | |||
643 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
644 | uint64_t entry) | ||
645 | { | ||
646 | void __iomem *ptr = (void *)rdev->gart.ptr; | ||
647 | writeq(entry, ptr + (i * 8)); | ||
644 | } | 648 | } |
645 | 649 | ||
646 | int rs600_irq_set(struct radeon_device *rdev) | 650 | int rs600_irq_set(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index aa7b872b2c43..83207929fc62 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev, | |||
123 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 123 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
124 | if (flags & R600_PTE_SYSTEM) { | 124 | if (flags & R600_PTE_SYSTEM) { |
125 | value = radeon_vm_map_gart(rdev, addr); | 125 | value = radeon_vm_map_gart(rdev, addr); |
126 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
127 | } else if (flags & R600_PTE_VALID) { | 126 | } else if (flags & R600_PTE_VALID) { |
128 | value = addr; | 127 | value = addr; |
129 | } else { | 128 | } else { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b5d22110f25..6c6b655defcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, | |||
406 | if (unlikely(ret != 0)) | 406 | if (unlikely(ret != 0)) |
407 | --dev_priv->num_3d_resources; | 407 | --dev_priv->num_3d_resources; |
408 | } else if (unhide_svga) { | 408 | } else if (unhide_svga) { |
409 | mutex_lock(&dev_priv->hw_mutex); | ||
410 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 409 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
411 | vmw_read(dev_priv, SVGA_REG_ENABLE) & | 410 | vmw_read(dev_priv, SVGA_REG_ENABLE) & |
412 | ~SVGA_REG_ENABLE_HIDE); | 411 | ~SVGA_REG_ENABLE_HIDE); |
413 | mutex_unlock(&dev_priv->hw_mutex); | ||
414 | } | 412 | } |
415 | 413 | ||
416 | mutex_unlock(&dev_priv->release_mutex); | 414 | mutex_unlock(&dev_priv->release_mutex); |
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, | |||
433 | mutex_lock(&dev_priv->release_mutex); | 431 | mutex_lock(&dev_priv->release_mutex); |
434 | if (unlikely(--dev_priv->num_3d_resources == 0)) | 432 | if (unlikely(--dev_priv->num_3d_resources == 0)) |
435 | vmw_release_device(dev_priv); | 433 | vmw_release_device(dev_priv); |
436 | else if (hide_svga) { | 434 | else if (hide_svga) |
437 | mutex_lock(&dev_priv->hw_mutex); | ||
438 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 435 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
439 | vmw_read(dev_priv, SVGA_REG_ENABLE) | | 436 | vmw_read(dev_priv, SVGA_REG_ENABLE) | |
440 | SVGA_REG_ENABLE_HIDE); | 437 | SVGA_REG_ENABLE_HIDE); |
441 | mutex_unlock(&dev_priv->hw_mutex); | ||
442 | } | ||
443 | 438 | ||
444 | n3d = (int32_t) dev_priv->num_3d_resources; | 439 | n3d = (int32_t) dev_priv->num_3d_resources; |
445 | mutex_unlock(&dev_priv->release_mutex); | 440 | mutex_unlock(&dev_priv->release_mutex); |
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
600 | dev_priv->dev = dev; | 595 | dev_priv->dev = dev; |
601 | dev_priv->vmw_chipset = chipset; | 596 | dev_priv->vmw_chipset = chipset; |
602 | dev_priv->last_read_seqno = (uint32_t) -100; | 597 | dev_priv->last_read_seqno = (uint32_t) -100; |
603 | mutex_init(&dev_priv->hw_mutex); | ||
604 | mutex_init(&dev_priv->cmdbuf_mutex); | 598 | mutex_init(&dev_priv->cmdbuf_mutex); |
605 | mutex_init(&dev_priv->release_mutex); | 599 | mutex_init(&dev_priv->release_mutex); |
606 | mutex_init(&dev_priv->binding_mutex); | 600 | mutex_init(&dev_priv->binding_mutex); |
607 | rwlock_init(&dev_priv->resource_lock); | 601 | rwlock_init(&dev_priv->resource_lock); |
608 | ttm_lock_init(&dev_priv->reservation_sem); | 602 | ttm_lock_init(&dev_priv->reservation_sem); |
603 | spin_lock_init(&dev_priv->hw_lock); | ||
604 | spin_lock_init(&dev_priv->waiter_lock); | ||
605 | spin_lock_init(&dev_priv->cap_lock); | ||
609 | 606 | ||
610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 607 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
611 | idr_init(&dev_priv->res_idr[i]); | 608 | idr_init(&dev_priv->res_idr[i]); |
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
626 | 623 | ||
627 | dev_priv->enable_fb = enable_fbdev; | 624 | dev_priv->enable_fb = enable_fbdev; |
628 | 625 | ||
629 | mutex_lock(&dev_priv->hw_mutex); | ||
630 | |||
631 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 626 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
632 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); | 627 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
633 | if (svga_id != SVGA_ID_2) { | 628 | if (svga_id != SVGA_ID_2) { |
634 | ret = -ENOSYS; | 629 | ret = -ENOSYS; |
635 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); | 630 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
636 | mutex_unlock(&dev_priv->hw_mutex); | ||
637 | goto out_err0; | 631 | goto out_err0; |
638 | } | 632 | } |
639 | 633 | ||
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
683 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 677 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
684 | 678 | ||
685 | ret = vmw_dma_masks(dev_priv); | 679 | ret = vmw_dma_masks(dev_priv); |
686 | if (unlikely(ret != 0)) { | 680 | if (unlikely(ret != 0)) |
687 | mutex_unlock(&dev_priv->hw_mutex); | ||
688 | goto out_err0; | 681 | goto out_err0; |
689 | } | ||
690 | 682 | ||
691 | /* | 683 | /* |
692 | * Limit back buffer size to VRAM size. Remove this once | 684 | * Limit back buffer size to VRAM size. Remove this once |
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
695 | if (dev_priv->prim_bb_mem > dev_priv->vram_size) | 687 | if (dev_priv->prim_bb_mem > dev_priv->vram_size) |
696 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 688 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
697 | 689 | ||
698 | mutex_unlock(&dev_priv->hw_mutex); | ||
699 | |||
700 | vmw_print_capabilities(dev_priv->capabilities); | 690 | vmw_print_capabilities(dev_priv->capabilities); |
701 | 691 | ||
702 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | 692 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev, | |||
1160 | if (unlikely(ret != 0)) | 1150 | if (unlikely(ret != 0)) |
1161 | return ret; | 1151 | return ret; |
1162 | vmw_kms_save_vga(dev_priv); | 1152 | vmw_kms_save_vga(dev_priv); |
1163 | mutex_lock(&dev_priv->hw_mutex); | ||
1164 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | 1153 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); |
1165 | mutex_unlock(&dev_priv->hw_mutex); | ||
1166 | } | 1154 | } |
1167 | 1155 | ||
1168 | if (active) { | 1156 | if (active) { |
@@ -1196,9 +1184,7 @@ out_no_active_lock: | |||
1196 | if (!dev_priv->enable_fb) { | 1184 | if (!dev_priv->enable_fb) { |
1197 | vmw_kms_restore_vga(dev_priv); | 1185 | vmw_kms_restore_vga(dev_priv); |
1198 | vmw_3d_resource_dec(dev_priv, true); | 1186 | vmw_3d_resource_dec(dev_priv, true); |
1199 | mutex_lock(&dev_priv->hw_mutex); | ||
1200 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 1187 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
1201 | mutex_unlock(&dev_priv->hw_mutex); | ||
1202 | } | 1188 | } |
1203 | return ret; | 1189 | return ret; |
1204 | } | 1190 | } |
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
1233 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | 1219 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); |
1234 | vmw_kms_restore_vga(dev_priv); | 1220 | vmw_kms_restore_vga(dev_priv); |
1235 | vmw_3d_resource_dec(dev_priv, true); | 1221 | vmw_3d_resource_dec(dev_priv, true); |
1236 | mutex_lock(&dev_priv->hw_mutex); | ||
1237 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 1222 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
1238 | mutex_unlock(&dev_priv->hw_mutex); | ||
1239 | } | 1223 | } |
1240 | 1224 | ||
1241 | dev_priv->active_master = &dev_priv->fbdev_master; | 1225 | dev_priv->active_master = &dev_priv->fbdev_master; |
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev) | |||
1367 | struct drm_device *dev = pci_get_drvdata(pdev); | 1351 | struct drm_device *dev = pci_get_drvdata(pdev); |
1368 | struct vmw_private *dev_priv = vmw_priv(dev); | 1352 | struct vmw_private *dev_priv = vmw_priv(dev); |
1369 | 1353 | ||
1370 | mutex_lock(&dev_priv->hw_mutex); | ||
1371 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 1354 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
1372 | (void) vmw_read(dev_priv, SVGA_REG_ID); | 1355 | (void) vmw_read(dev_priv, SVGA_REG_ID); |
1373 | mutex_unlock(&dev_priv->hw_mutex); | ||
1374 | 1356 | ||
1375 | /** | 1357 | /** |
1376 | * Reclaim 3d reference held by fbdev and potentially | 1358 | * Reclaim 3d reference held by fbdev and potentially |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -399,7 +399,8 @@ struct vmw_private { | |||
399 | uint32_t memory_size; | 399 | uint32_t memory_size; |
400 | bool has_gmr; | 400 | bool has_gmr; |
401 | bool has_mob; | 401 | bool has_mob; |
402 | struct mutex hw_mutex; | 402 | spinlock_t hw_lock; |
403 | spinlock_t cap_lock; | ||
403 | 404 | ||
404 | /* | 405 | /* |
405 | * VGA registers. | 406 | * VGA registers. |
@@ -449,8 +450,9 @@ struct vmw_private { | |||
449 | atomic_t marker_seq; | 450 | atomic_t marker_seq; |
450 | wait_queue_head_t fence_queue; | 451 | wait_queue_head_t fence_queue; |
451 | wait_queue_head_t fifo_queue; | 452 | wait_queue_head_t fifo_queue; |
452 | int fence_queue_waiters; /* Protected by hw_mutex */ | 453 | spinlock_t waiter_lock; |
453 | int goal_queue_waiters; /* Protected by hw_mutex */ | 454 | int fence_queue_waiters; /* Protected by waiter_lock */ |
455 | int goal_queue_waiters; /* Protected by waiter_lock */ | ||
454 | atomic_t fifo_queue_waiters; | 456 | atomic_t fifo_queue_waiters; |
455 | uint32_t last_read_seqno; | 457 | uint32_t last_read_seqno; |
456 | spinlock_t irq_lock; | 458 | spinlock_t irq_lock; |
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) | |||
553 | return (struct vmw_master *) master->driver_priv; | 555 | return (struct vmw_master *) master->driver_priv; |
554 | } | 556 | } |
555 | 557 | ||
558 | /* | ||
559 | * The locking here is fine-grained, so that it is performed once | ||
560 | * for every read- and write operation. This is of course costly, but we | ||
561 | * don't perform much register access in the timing critical paths anyway. | ||
562 | * Instead we have the extra benefit of being sure that we don't forget | ||
563 | * the hw lock around register accesses. | ||
564 | */ | ||
556 | static inline void vmw_write(struct vmw_private *dev_priv, | 565 | static inline void vmw_write(struct vmw_private *dev_priv, |
557 | unsigned int offset, uint32_t value) | 566 | unsigned int offset, uint32_t value) |
558 | { | 567 | { |
568 | unsigned long irq_flags; | ||
569 | |||
570 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | ||
559 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 571 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
560 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); | 572 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); |
573 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | ||
561 | } | 574 | } |
562 | 575 | ||
563 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, | 576 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, |
564 | unsigned int offset) | 577 | unsigned int offset) |
565 | { | 578 | { |
566 | uint32_t val; | 579 | unsigned long irq_flags; |
580 | u32 val; | ||
567 | 581 | ||
582 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | ||
568 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 583 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
569 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); | 584 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); |
585 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | ||
586 | |||
570 | return val; | 587 | return val; |
571 | } | 588 | } |
572 | 589 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b7594cb758af..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -35,7 +35,7 @@ struct vmw_fence_manager { | |||
35 | struct vmw_private *dev_priv; | 35 | struct vmw_private *dev_priv; |
36 | spinlock_t lock; | 36 | spinlock_t lock; |
37 | struct list_head fence_list; | 37 | struct list_head fence_list; |
38 | struct work_struct work, ping_work; | 38 | struct work_struct work; |
39 | u32 user_fence_size; | 39 | u32 user_fence_size; |
40 | u32 fence_size; | 40 | u32 fence_size; |
41 | u32 event_fence_action_size; | 41 | u32 event_fence_action_size; |
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) | |||
134 | return "svga"; | 134 | return "svga"; |
135 | } | 135 | } |
136 | 136 | ||
137 | static void vmw_fence_ping_func(struct work_struct *work) | ||
138 | { | ||
139 | struct vmw_fence_manager *fman = | ||
140 | container_of(work, struct vmw_fence_manager, ping_work); | ||
141 | |||
142 | vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); | ||
143 | } | ||
144 | |||
145 | static bool vmw_fence_enable_signaling(struct fence *f) | 137 | static bool vmw_fence_enable_signaling(struct fence *f) |
146 | { | 138 | { |
147 | struct vmw_fence_obj *fence = | 139 | struct vmw_fence_obj *fence = |
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) | |||
155 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) | 147 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) |
156 | return false; | 148 | return false; |
157 | 149 | ||
158 | if (mutex_trylock(&dev_priv->hw_mutex)) { | 150 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
159 | vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); | ||
160 | mutex_unlock(&dev_priv->hw_mutex); | ||
161 | } else | ||
162 | schedule_work(&fman->ping_work); | ||
163 | 151 | ||
164 | return true; | 152 | return true; |
165 | } | 153 | } |
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
305 | INIT_LIST_HEAD(&fman->fence_list); | 293 | INIT_LIST_HEAD(&fman->fence_list); |
306 | INIT_LIST_HEAD(&fman->cleanup_list); | 294 | INIT_LIST_HEAD(&fman->cleanup_list); |
307 | INIT_WORK(&fman->work, &vmw_fence_work_func); | 295 | INIT_WORK(&fman->work, &vmw_fence_work_func); |
308 | INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); | ||
309 | fman->fifo_down = true; | 296 | fman->fifo_down = true; |
310 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); | 297 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); |
311 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); | 298 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); |
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) | |||
323 | bool lists_empty; | 310 | bool lists_empty; |
324 | 311 | ||
325 | (void) cancel_work_sync(&fman->work); | 312 | (void) cancel_work_sync(&fman->work); |
326 | (void) cancel_work_sync(&fman->ping_work); | ||
327 | 313 | ||
328 | spin_lock_irqsave(&fman->lock, irq_flags); | 314 | spin_lock_irqsave(&fman->lock, irq_flags); |
329 | lists_empty = list_empty(&fman->fence_list) && | 315 | lists_empty = list_empty(&fman->fence_list) && |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
44 | if (!dev_priv->has_mob) | 44 | if (!dev_priv->has_mob) |
45 | return false; | 45 | return false; |
46 | 46 | ||
47 | mutex_lock(&dev_priv->hw_mutex); | 47 | spin_lock(&dev_priv->cap_lock); |
48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); | 48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); |
49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
50 | mutex_unlock(&dev_priv->hw_mutex); | 50 | spin_unlock(&dev_priv->cap_lock); |
51 | 51 | ||
52 | return (result != 0); | 52 | return (result != 0); |
53 | } | 53 | } |
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
120 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); | 120 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); |
121 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); | 121 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); |
122 | 122 | ||
123 | mutex_lock(&dev_priv->hw_mutex); | ||
124 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 123 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
125 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 124 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
126 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | 125 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); |
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
143 | mb(); | 142 | mb(); |
144 | 143 | ||
145 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); | 144 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); |
146 | mutex_unlock(&dev_priv->hw_mutex); | ||
147 | 145 | ||
148 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 146 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
149 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 147 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
160 | return vmw_fifo_send_fence(dev_priv, &dummy); | 158 | return vmw_fifo_send_fence(dev_priv, &dummy); |
161 | } | 159 | } |
162 | 160 | ||
163 | void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) | 161 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) |
164 | { | 162 | { |
165 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 163 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
164 | static DEFINE_SPINLOCK(ping_lock); | ||
165 | unsigned long irq_flags; | ||
166 | 166 | ||
167 | /* | ||
168 | * The ping_lock is needed because we don't have an atomic | ||
169 | * test-and-set of the SVGA_FIFO_BUSY register. | ||
170 | */ | ||
171 | spin_lock_irqsave(&ping_lock, irq_flags); | ||
167 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { | 172 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { |
168 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); | 173 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); |
169 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); | 174 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); |
170 | } | 175 | } |
171 | } | 176 | spin_unlock_irqrestore(&ping_lock, irq_flags); |
172 | |||
173 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | ||
174 | { | ||
175 | mutex_lock(&dev_priv->hw_mutex); | ||
176 | |||
177 | vmw_fifo_ping_host_locked(dev_priv, reason); | ||
178 | |||
179 | mutex_unlock(&dev_priv->hw_mutex); | ||
180 | } | 177 | } |
181 | 178 | ||
182 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 179 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
183 | { | 180 | { |
184 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 181 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
185 | 182 | ||
186 | mutex_lock(&dev_priv->hw_mutex); | ||
187 | |||
188 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | 183 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); |
189 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | 184 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) |
190 | ; | 185 | ; |
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
198 | vmw_write(dev_priv, SVGA_REG_TRACES, | 193 | vmw_write(dev_priv, SVGA_REG_TRACES, |
199 | dev_priv->traces_state); | 194 | dev_priv->traces_state); |
200 | 195 | ||
201 | mutex_unlock(&dev_priv->hw_mutex); | ||
202 | vmw_marker_queue_takedown(&fifo->marker_queue); | 196 | vmw_marker_queue_takedown(&fifo->marker_queue); |
203 | 197 | ||
204 | if (likely(fifo->static_buffer != NULL)) { | 198 | if (likely(fifo->static_buffer != NULL)) { |
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
271 | return vmw_fifo_wait_noirq(dev_priv, bytes, | 265 | return vmw_fifo_wait_noirq(dev_priv, bytes, |
272 | interruptible, timeout); | 266 | interruptible, timeout); |
273 | 267 | ||
274 | mutex_lock(&dev_priv->hw_mutex); | 268 | spin_lock(&dev_priv->waiter_lock); |
275 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { | 269 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { |
276 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 270 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
277 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, | 271 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, |
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
280 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 274 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
281 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 275 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
282 | } | 276 | } |
283 | mutex_unlock(&dev_priv->hw_mutex); | 277 | spin_unlock(&dev_priv->waiter_lock); |
284 | 278 | ||
285 | if (interruptible) | 279 | if (interruptible) |
286 | ret = wait_event_interruptible_timeout | 280 | ret = wait_event_interruptible_timeout |
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
296 | else if (likely(ret > 0)) | 290 | else if (likely(ret > 0)) |
297 | ret = 0; | 291 | ret = 0; |
298 | 292 | ||
299 | mutex_lock(&dev_priv->hw_mutex); | 293 | spin_lock(&dev_priv->waiter_lock); |
300 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { | 294 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { |
301 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 295 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
302 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; | 296 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; |
303 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 297 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
304 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 298 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
305 | } | 299 | } |
306 | mutex_unlock(&dev_priv->hw_mutex); | 300 | spin_unlock(&dev_priv->waiter_lock); |
307 | 301 | ||
308 | return ret; | 302 | return ret; |
309 | } | 303 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, | |||
135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); | 135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); |
136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; | 136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; |
137 | 137 | ||
138 | mutex_lock(&dev_priv->hw_mutex); | 138 | spin_lock(&dev_priv->cap_lock); |
139 | for (i = 0; i < max_size; ++i) { | 139 | for (i = 0; i < max_size; ++i) { |
140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
141 | compat_cap->pairs[i][0] = i; | 141 | compat_cap->pairs[i][0] = i; |
142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
143 | } | 143 | } |
144 | mutex_unlock(&dev_priv->hw_mutex); | 144 | spin_unlock(&dev_priv->cap_lock); |
145 | 145 | ||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
191 | if (num > SVGA3D_DEVCAP_MAX) | 191 | if (num > SVGA3D_DEVCAP_MAX) |
192 | num = SVGA3D_DEVCAP_MAX; | 192 | num = SVGA3D_DEVCAP_MAX; |
193 | 193 | ||
194 | mutex_lock(&dev_priv->hw_mutex); | 194 | spin_lock(&dev_priv->cap_lock); |
195 | for (i = 0; i < num; ++i) { | 195 | for (i = 0; i < num; ++i) { |
196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
198 | } | 198 | } |
199 | mutex_unlock(&dev_priv->hw_mutex); | 199 | spin_unlock(&dev_priv->cap_lock); |
200 | } else if (gb_objects) { | 200 | } else if (gb_objects) { |
201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); | 201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); |
202 | if (unlikely(ret != 0)) | 202 | if (unlikely(ret != 0)) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) | |||
62 | 62 | ||
63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) | 63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
64 | { | 64 | { |
65 | uint32_t busy; | ||
66 | 65 | ||
67 | mutex_lock(&dev_priv->hw_mutex); | 66 | return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); |
68 | busy = vmw_read(dev_priv, SVGA_REG_BUSY); | ||
69 | mutex_unlock(&dev_priv->hw_mutex); | ||
70 | |||
71 | return (busy == 0); | ||
72 | } | 67 | } |
73 | 68 | ||
74 | void vmw_update_seqno(struct vmw_private *dev_priv, | 69 | void vmw_update_seqno(struct vmw_private *dev_priv, |
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
184 | 179 | ||
185 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | 180 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) |
186 | { | 181 | { |
187 | mutex_lock(&dev_priv->hw_mutex); | 182 | spin_lock(&dev_priv->waiter_lock); |
188 | if (dev_priv->fence_queue_waiters++ == 0) { | 183 | if (dev_priv->fence_queue_waiters++ == 0) { |
189 | unsigned long irq_flags; | 184 | unsigned long irq_flags; |
190 | 185 | ||
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | |||
195 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 190 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 191 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
197 | } | 192 | } |
198 | mutex_unlock(&dev_priv->hw_mutex); | 193 | spin_unlock(&dev_priv->waiter_lock); |
199 | } | 194 | } |
200 | 195 | ||
201 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | 196 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) |
202 | { | 197 | { |
203 | mutex_lock(&dev_priv->hw_mutex); | 198 | spin_lock(&dev_priv->waiter_lock); |
204 | if (--dev_priv->fence_queue_waiters == 0) { | 199 | if (--dev_priv->fence_queue_waiters == 0) { |
205 | unsigned long irq_flags; | 200 | unsigned long irq_flags; |
206 | 201 | ||
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | |||
209 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 204 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 205 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
211 | } | 206 | } |
212 | mutex_unlock(&dev_priv->hw_mutex); | 207 | spin_unlock(&dev_priv->waiter_lock); |
213 | } | 208 | } |
214 | 209 | ||
215 | 210 | ||
216 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) | 211 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) |
217 | { | 212 | { |
218 | mutex_lock(&dev_priv->hw_mutex); | 213 | spin_lock(&dev_priv->waiter_lock); |
219 | if (dev_priv->goal_queue_waiters++ == 0) { | 214 | if (dev_priv->goal_queue_waiters++ == 0) { |
220 | unsigned long irq_flags; | 215 | unsigned long irq_flags; |
221 | 216 | ||
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) | |||
226 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 221 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
227 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 222 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
228 | } | 223 | } |
229 | mutex_unlock(&dev_priv->hw_mutex); | 224 | spin_unlock(&dev_priv->waiter_lock); |
230 | } | 225 | } |
231 | 226 | ||
232 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | 227 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) |
233 | { | 228 | { |
234 | mutex_lock(&dev_priv->hw_mutex); | 229 | spin_lock(&dev_priv->waiter_lock); |
235 | if (--dev_priv->goal_queue_waiters == 0) { | 230 | if (--dev_priv->goal_queue_waiters == 0) { |
236 | unsigned long irq_flags; | 231 | unsigned long irq_flags; |
237 | 232 | ||
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | |||
240 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 235 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
241 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 236 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
242 | } | 237 | } |
243 | mutex_unlock(&dev_priv->hw_mutex); | 238 | spin_unlock(&dev_priv->waiter_lock); |
244 | } | 239 | } |
245 | 240 | ||
246 | int vmw_wait_seqno(struct vmw_private *dev_priv, | 241 | int vmw_wait_seqno(struct vmw_private *dev_priv, |
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) | |||
315 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | 310 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
316 | return; | 311 | return; |
317 | 312 | ||
318 | mutex_lock(&dev_priv->hw_mutex); | ||
319 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); | 313 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); |
320 | mutex_unlock(&dev_priv->hw_mutex); | ||
321 | 314 | ||
322 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 315 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
323 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 316 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3725b521d931..8725b79e7847 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) | |||
1828 | struct vmw_private *dev_priv = vmw_priv(dev); | 1828 | struct vmw_private *dev_priv = vmw_priv(dev); |
1829 | struct vmw_display_unit *du = vmw_connector_to_du(connector); | 1829 | struct vmw_display_unit *du = vmw_connector_to_du(connector); |
1830 | 1830 | ||
1831 | mutex_lock(&dev_priv->hw_mutex); | ||
1832 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); | 1831 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); |
1833 | mutex_unlock(&dev_priv->hw_mutex); | ||
1834 | 1832 | ||
1835 | return ((vmw_connector_to_du(connector)->unit < num_displays && | 1833 | return ((vmw_connector_to_du(connector)->unit < num_displays && |
1836 | du->pref_active) ? | 1834 | du->pref_active) ? |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 4d6b26979fbd..bb3725b672cf 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
@@ -861,8 +861,8 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) | |||
861 | break; | 861 | break; |
862 | 862 | ||
863 | case ACPI_RESOURCE_TYPE_ADDRESS64: | 863 | case ACPI_RESOURCE_TYPE_ADDRESS64: |
864 | hyperv_mmio.start = res->data.address64.minimum; | 864 | hyperv_mmio.start = res->data.address64.address.minimum; |
865 | hyperv_mmio.end = res->data.address64.maximum; | 865 | hyperv_mmio.end = res->data.address64.address.maximum; |
866 | break; | 866 | break; |
867 | } | 867 | } |
868 | 868 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index a7de26d1ac80..d931cbbed240 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -1389,6 +1389,7 @@ config SENSORS_ADS1015 | |||
1389 | config SENSORS_ADS7828 | 1389 | config SENSORS_ADS7828 |
1390 | tristate "Texas Instruments ADS7828 and compatibles" | 1390 | tristate "Texas Instruments ADS7828 and compatibles" |
1391 | depends on I2C | 1391 | depends on I2C |
1392 | select REGMAP_I2C | ||
1392 | help | 1393 | help |
1393 | If you say yes here you get support for Texas Instruments ADS7828 and | 1394 | If you say yes here you get support for Texas Instruments ADS7828 and |
1394 | ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while | 1395 | ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while |
@@ -1430,8 +1431,8 @@ config SENSORS_INA2XX | |||
1430 | tristate "Texas Instruments INA219 and compatibles" | 1431 | tristate "Texas Instruments INA219 and compatibles" |
1431 | depends on I2C | 1432 | depends on I2C |
1432 | help | 1433 | help |
1433 | If you say yes here you get support for INA219, INA220, INA226, and | 1434 | If you say yes here you get support for INA219, INA220, INA226, |
1434 | INA230 power monitor chips. | 1435 | INA230, and INA231 power monitor chips. |
1435 | 1436 | ||
1436 | The INA2xx driver is configured for the default configuration of | 1437 | The INA2xx driver is configured for the default configuration of |
1437 | the part as described in the datasheet. | 1438 | the part as described in the datasheet. |
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 13875968c844..6cb89c0ebab6 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c | |||
@@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev, | |||
221 | struct abx500_temp *data = dev_get_drvdata(dev); | 221 | struct abx500_temp *data = dev_get_drvdata(dev); |
222 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 222 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
223 | 223 | ||
224 | return sprintf(buf, "%ld\n", data->min[attr->index]); | 224 | return sprintf(buf, "%lu\n", data->min[attr->index]); |
225 | } | 225 | } |
226 | 226 | ||
227 | static ssize_t show_max(struct device *dev, | 227 | static ssize_t show_max(struct device *dev, |
@@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev, | |||
230 | struct abx500_temp *data = dev_get_drvdata(dev); | 230 | struct abx500_temp *data = dev_get_drvdata(dev); |
231 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 231 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
232 | 232 | ||
233 | return sprintf(buf, "%ld\n", data->max[attr->index]); | 233 | return sprintf(buf, "%lu\n", data->max[attr->index]); |
234 | } | 234 | } |
235 | 235 | ||
236 | static ssize_t show_max_hyst(struct device *dev, | 236 | static ssize_t show_max_hyst(struct device *dev, |
@@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev, | |||
239 | struct abx500_temp *data = dev_get_drvdata(dev); | 239 | struct abx500_temp *data = dev_get_drvdata(dev); |
240 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 240 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
241 | 241 | ||
242 | return sprintf(buf, "%ld\n", data->max_hyst[attr->index]); | 242 | return sprintf(buf, "%lu\n", data->max_hyst[attr->index]); |
243 | } | 243 | } |
244 | 244 | ||
245 | static ssize_t show_min_alarm(struct device *dev, | 245 | static ssize_t show_min_alarm(struct device *dev, |
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index f4f9b219bf16..11955467fc0f 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/hwmon.h> | 17 | #include <linux/hwmon.h> |
18 | #include <linux/hwmon-sysfs.h> | 18 | #include <linux/hwmon-sysfs.h> |
19 | #include <linux/bitops.h> | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * AD7314 temperature masks | 22 | * AD7314 temperature masks |
@@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, | |||
67 | switch (spi_get_device_id(chip->spi_dev)->driver_data) { | 68 | switch (spi_get_device_id(chip->spi_dev)->driver_data) { |
68 | case ad7314: | 69 | case ad7314: |
69 | data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; | 70 | data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; |
70 | data = (data << 6) >> 6; | 71 | data = sign_extend32(data, 9); |
71 | 72 | ||
72 | return sprintf(buf, "%d\n", 250 * data); | 73 | return sprintf(buf, "%d\n", 250 * data); |
73 | case adt7301: | 74 | case adt7301: |
@@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, | |||
78 | * register. 1lsb - 31.25 milli degrees centigrade | 79 | * register. 1lsb - 31.25 milli degrees centigrade |
79 | */ | 80 | */ |
80 | data = ret & ADT7301_TEMP_MASK; | 81 | data = ret & ADT7301_TEMP_MASK; |
81 | data = (data << 2) >> 2; | 82 | data = sign_extend32(data, 13); |
82 | 83 | ||
83 | return sprintf(buf, "%d\n", | 84 | return sprintf(buf, "%d\n", |
84 | DIV_ROUND_CLOSEST(data * 3125, 100)); | 85 | DIV_ROUND_CLOSEST(data * 3125, 100)); |
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c index 0625e50d7a6e..ad2b47e40345 100644 --- a/drivers/hwmon/adc128d818.c +++ b/drivers/hwmon/adc128d818.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/err.h> | 27 | #include <linux/err.h> |
28 | #include <linux/regulator/consumer.h> | 28 | #include <linux/regulator/consumer.h> |
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <linux/bitops.h> | ||
30 | 31 | ||
31 | /* Addresses to scan | 32 | /* Addresses to scan |
32 | * The chip also supports addresses 0x35..0x37. Don't scan those addresses | 33 | * The chip also supports addresses 0x35..0x37. Don't scan those addresses |
@@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev, | |||
189 | if (IS_ERR(data)) | 190 | if (IS_ERR(data)) |
190 | return PTR_ERR(data); | 191 | return PTR_ERR(data); |
191 | 192 | ||
192 | temp = (data->temp[index] << 7) >> 7; /* sign extend */ | 193 | temp = sign_extend32(data->temp[index], 8); |
193 | return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */ | 194 | return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */ |
194 | } | 195 | } |
195 | 196 | ||
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index a622d40eec17..bce4e9ff21bf 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c | |||
@@ -30,14 +30,12 @@ | |||
30 | #include <linux/hwmon-sysfs.h> | 30 | #include <linux/hwmon-sysfs.h> |
31 | #include <linux/i2c.h> | 31 | #include <linux/i2c.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/jiffies.h> | ||
34 | #include <linux/module.h> | 33 | #include <linux/module.h> |
35 | #include <linux/mutex.h> | ||
36 | #include <linux/platform_data/ads7828.h> | 34 | #include <linux/platform_data/ads7828.h> |
35 | #include <linux/regmap.h> | ||
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | 37 | ||
39 | /* The ADS7828 registers */ | 38 | /* The ADS7828 registers */ |
40 | #define ADS7828_NCH 8 /* 8 channels supported */ | ||
41 | #define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ | 39 | #define ADS7828_CMD_SD_SE 0x80 /* Single ended inputs */ |
42 | #define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */ | 40 | #define ADS7828_CMD_PD1 0x04 /* Internal vref OFF && A/D ON */ |
43 | #define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */ | 41 | #define ADS7828_CMD_PD3 0x0C /* Internal vref ON && A/D ON */ |
@@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 }; | |||
50 | 48 | ||
51 | /* Client specific data */ | 49 | /* Client specific data */ |
52 | struct ads7828_data { | 50 | struct ads7828_data { |
53 | struct i2c_client *client; | 51 | struct regmap *regmap; |
54 | struct mutex update_lock; /* Mutex protecting updates */ | ||
55 | unsigned long last_updated; /* Last updated time (in jiffies) */ | ||
56 | u16 adc_input[ADS7828_NCH]; /* ADS7828_NCH samples */ | ||
57 | bool valid; /* Validity flag */ | ||
58 | bool diff_input; /* Differential input */ | ||
59 | bool ext_vref; /* External voltage reference */ | ||
60 | unsigned int vref_mv; /* voltage reference value */ | ||
61 | u8 cmd_byte; /* Command byte without channel bits */ | 52 | u8 cmd_byte; /* Command byte without channel bits */ |
62 | unsigned int lsb_resol; /* Resolution of the ADC sample LSB */ | 53 | unsigned int lsb_resol; /* Resolution of the ADC sample LSB */ |
63 | s32 (*read_channel)(const struct i2c_client *client, u8 command); | ||
64 | }; | 54 | }; |
65 | 55 | ||
66 | /* Command byte C2,C1,C0 - see datasheet */ | 56 | /* Command byte C2,C1,C0 - see datasheet */ |
@@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch) | |||
69 | return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4); | 59 | return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4); |
70 | } | 60 | } |
71 | 61 | ||
72 | /* Update data for the device (all 8 channels) */ | ||
73 | static struct ads7828_data *ads7828_update_device(struct device *dev) | ||
74 | { | ||
75 | struct ads7828_data *data = dev_get_drvdata(dev); | ||
76 | struct i2c_client *client = data->client; | ||
77 | |||
78 | mutex_lock(&data->update_lock); | ||
79 | |||
80 | if (time_after(jiffies, data->last_updated + HZ + HZ / 2) | ||
81 | || !data->valid) { | ||
82 | unsigned int ch; | ||
83 | dev_dbg(&client->dev, "Starting ads7828 update\n"); | ||
84 | |||
85 | for (ch = 0; ch < ADS7828_NCH; ch++) { | ||
86 | u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch); | ||
87 | data->adc_input[ch] = data->read_channel(client, cmd); | ||
88 | } | ||
89 | data->last_updated = jiffies; | ||
90 | data->valid = true; | ||
91 | } | ||
92 | |||
93 | mutex_unlock(&data->update_lock); | ||
94 | |||
95 | return data; | ||
96 | } | ||
97 | |||
98 | /* sysfs callback function */ | 62 | /* sysfs callback function */ |
99 | static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da, | 63 | static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da, |
100 | char *buf) | 64 | char *buf) |
101 | { | 65 | { |
102 | struct sensor_device_attribute *attr = to_sensor_dev_attr(da); | 66 | struct sensor_device_attribute *attr = to_sensor_dev_attr(da); |
103 | struct ads7828_data *data = ads7828_update_device(dev); | 67 | struct ads7828_data *data = dev_get_drvdata(dev); |
104 | unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] * | 68 | u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index); |
105 | data->lsb_resol, 1000); | 69 | unsigned int regval; |
70 | int err; | ||
106 | 71 | ||
107 | return sprintf(buf, "%d\n", value); | 72 | err = regmap_read(data->regmap, cmd, ®val); |
73 | if (err < 0) | ||
74 | return err; | ||
75 | |||
76 | return sprintf(buf, "%d\n", | ||
77 | DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000)); | ||
108 | } | 78 | } |
109 | 79 | ||
110 | static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0); | 80 | static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0); |
@@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = { | |||
130 | 100 | ||
131 | ATTRIBUTE_GROUPS(ads7828); | 101 | ATTRIBUTE_GROUPS(ads7828); |
132 | 102 | ||
103 | static const struct regmap_config ads2828_regmap_config = { | ||
104 | .reg_bits = 8, | ||
105 | .val_bits = 16, | ||
106 | }; | ||
107 | |||
108 | static const struct regmap_config ads2830_regmap_config = { | ||
109 | .reg_bits = 8, | ||
110 | .val_bits = 8, | ||
111 | }; | ||
112 | |||
133 | static int ads7828_probe(struct i2c_client *client, | 113 | static int ads7828_probe(struct i2c_client *client, |
134 | const struct i2c_device_id *id) | 114 | const struct i2c_device_id *id) |
135 | { | 115 | { |
@@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client, | |||
137 | struct ads7828_platform_data *pdata = dev_get_platdata(dev); | 117 | struct ads7828_platform_data *pdata = dev_get_platdata(dev); |
138 | struct ads7828_data *data; | 118 | struct ads7828_data *data; |
139 | struct device *hwmon_dev; | 119 | struct device *hwmon_dev; |
120 | unsigned int vref_mv = ADS7828_INT_VREF_MV; | ||
121 | bool diff_input = false; | ||
122 | bool ext_vref = false; | ||
140 | 123 | ||
141 | data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); | 124 | data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL); |
142 | if (!data) | 125 | if (!data) |
143 | return -ENOMEM; | 126 | return -ENOMEM; |
144 | 127 | ||
145 | if (pdata) { | 128 | if (pdata) { |
146 | data->diff_input = pdata->diff_input; | 129 | diff_input = pdata->diff_input; |
147 | data->ext_vref = pdata->ext_vref; | 130 | ext_vref = pdata->ext_vref; |
148 | if (data->ext_vref) | 131 | if (ext_vref && pdata->vref_mv) |
149 | data->vref_mv = pdata->vref_mv; | 132 | vref_mv = pdata->vref_mv; |
150 | } | 133 | } |
151 | 134 | ||
152 | /* Bound Vref with min/max values if it was provided */ | 135 | /* Bound Vref with min/max values */ |
153 | if (data->vref_mv) | 136 | vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN, |
154 | data->vref_mv = clamp_val(data->vref_mv, | 137 | ADS7828_EXT_VREF_MV_MAX); |
155 | ADS7828_EXT_VREF_MV_MIN, | ||
156 | ADS7828_EXT_VREF_MV_MAX); | ||
157 | else | ||
158 | data->vref_mv = ADS7828_INT_VREF_MV; | ||
159 | 138 | ||
160 | /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */ | 139 | /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */ |
161 | if (id->driver_data == ads7828) { | 140 | if (id->driver_data == ads7828) { |
162 | data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096); | 141 | data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096); |
163 | data->read_channel = i2c_smbus_read_word_swapped; | 142 | data->regmap = devm_regmap_init_i2c(client, |
143 | &ads2828_regmap_config); | ||
164 | } else { | 144 | } else { |
165 | data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256); | 145 | data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256); |
166 | data->read_channel = i2c_smbus_read_byte_data; | 146 | data->regmap = devm_regmap_init_i2c(client, |
147 | &ads2830_regmap_config); | ||
167 | } | 148 | } |
168 | 149 | ||
169 | data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; | 150 | data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; |
170 | if (!data->diff_input) | 151 | if (!diff_input) |
171 | data->cmd_byte |= ADS7828_CMD_SD_SE; | 152 | data->cmd_byte |= ADS7828_CMD_SD_SE; |
172 | 153 | ||
173 | data->client = client; | ||
174 | mutex_init(&data->update_lock); | ||
175 | |||
176 | hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, | 154 | hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, |
177 | data, | 155 | data, |
178 | ads7828_groups); | 156 | ads7828_groups); |
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index e01feba909c3..d1542b7d4bc3 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/hwmon-sysfs.h> | 35 | #include <linux/hwmon-sysfs.h> |
36 | #include <linux/jiffies.h> | 36 | #include <linux/jiffies.h> |
37 | #include <linux/of.h> | 37 | #include <linux/of.h> |
38 | #include <linux/delay.h> | ||
38 | 39 | ||
39 | #include <linux/platform_data/ina2xx.h> | 40 | #include <linux/platform_data/ina2xx.h> |
40 | 41 | ||
@@ -51,7 +52,6 @@ | |||
51 | #define INA226_ALERT_LIMIT 0x07 | 52 | #define INA226_ALERT_LIMIT 0x07 |
52 | #define INA226_DIE_ID 0xFF | 53 | #define INA226_DIE_ID 0xFF |
53 | 54 | ||
54 | |||
55 | /* register count */ | 55 | /* register count */ |
56 | #define INA219_REGISTERS 6 | 56 | #define INA219_REGISTERS 6 |
57 | #define INA226_REGISTERS 8 | 57 | #define INA226_REGISTERS 8 |
@@ -64,6 +64,24 @@ | |||
64 | 64 | ||
65 | /* worst case is 68.10 ms (~14.6Hz, ina219) */ | 65 | /* worst case is 68.10 ms (~14.6Hz, ina219) */ |
66 | #define INA2XX_CONVERSION_RATE 15 | 66 | #define INA2XX_CONVERSION_RATE 15 |
67 | #define INA2XX_MAX_DELAY 69 /* worst case delay in ms */ | ||
68 | |||
69 | #define INA2XX_RSHUNT_DEFAULT 10000 | ||
70 | |||
71 | /* bit mask for reading the averaging setting in the configuration register */ | ||
72 | #define INA226_AVG_RD_MASK 0x0E00 | ||
73 | |||
74 | #define INA226_READ_AVG(reg) (((reg) & INA226_AVG_RD_MASK) >> 9) | ||
75 | #define INA226_SHIFT_AVG(val) ((val) << 9) | ||
76 | |||
77 | /* common attrs, ina226 attrs and NULL */ | ||
78 | #define INA2XX_MAX_ATTRIBUTE_GROUPS 3 | ||
79 | |||
80 | /* | ||
81 | * Both bus voltage and shunt voltage conversion times for ina226 are set | ||
82 | * to 0b0100 on POR, which translates to 2200 microseconds in total. | ||
83 | */ | ||
84 | #define INA226_TOTAL_CONV_TIME_DEFAULT 2200 | ||
67 | 85 | ||
68 | enum ina2xx_ids { ina219, ina226 }; | 86 | enum ina2xx_ids { ina219, ina226 }; |
69 | 87 | ||
@@ -81,11 +99,16 @@ struct ina2xx_data { | |||
81 | struct i2c_client *client; | 99 | struct i2c_client *client; |
82 | const struct ina2xx_config *config; | 100 | const struct ina2xx_config *config; |
83 | 101 | ||
102 | long rshunt; | ||
103 | u16 curr_config; | ||
104 | |||
84 | struct mutex update_lock; | 105 | struct mutex update_lock; |
85 | bool valid; | 106 | bool valid; |
86 | unsigned long last_updated; | 107 | unsigned long last_updated; |
108 | int update_interval; /* in jiffies */ | ||
87 | 109 | ||
88 | int kind; | 110 | int kind; |
111 | const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS]; | ||
89 | u16 regs[INA2XX_MAX_REGISTERS]; | 112 | u16 regs[INA2XX_MAX_REGISTERS]; |
90 | }; | 113 | }; |
91 | 114 | ||
@@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = { | |||
110 | }, | 133 | }, |
111 | }; | 134 | }; |
112 | 135 | ||
113 | static struct ina2xx_data *ina2xx_update_device(struct device *dev) | 136 | /* |
137 | * Available averaging rates for ina226. The indices correspond with | ||
138 | * the bit values expected by the chip (according to the ina226 datasheet, | ||
139 | * table 3 AVG bit settings, found at | ||
140 | * http://www.ti.com/lit/ds/symlink/ina226.pdf. | ||
141 | */ | ||
142 | static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 }; | ||
143 | |||
144 | static int ina226_avg_bits(int avg) | ||
145 | { | ||
146 | int i; | ||
147 | |||
148 | /* Get the closest average from the tab. */ | ||
149 | for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) { | ||
150 | if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2) | ||
151 | break; | ||
152 | } | ||
153 | |||
154 | return i; /* Return 0b0111 for values greater than 1024. */ | ||
155 | } | ||
156 | |||
157 | static int ina226_reg_to_interval(u16 config) | ||
158 | { | ||
159 | int avg = ina226_avg_tab[INA226_READ_AVG(config)]; | ||
160 | |||
161 | /* | ||
162 | * Multiply the total conversion time by the number of averages. | ||
163 | * Return the result in milliseconds. | ||
164 | */ | ||
165 | return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000); | ||
166 | } | ||
167 | |||
168 | static u16 ina226_interval_to_reg(int interval, u16 config) | ||
169 | { | ||
170 | int avg, avg_bits; | ||
171 | |||
172 | avg = DIV_ROUND_CLOSEST(interval * 1000, | ||
173 | INA226_TOTAL_CONV_TIME_DEFAULT); | ||
174 | avg_bits = ina226_avg_bits(avg); | ||
175 | |||
176 | return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits); | ||
177 | } | ||
178 | |||
179 | static void ina226_set_update_interval(struct ina2xx_data *data) | ||
180 | { | ||
181 | int ms; | ||
182 | |||
183 | ms = ina226_reg_to_interval(data->curr_config); | ||
184 | data->update_interval = msecs_to_jiffies(ms); | ||
185 | } | ||
186 | |||
187 | static int ina2xx_calibrate(struct ina2xx_data *data) | ||
188 | { | ||
189 | u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, | ||
190 | data->rshunt); | ||
191 | |||
192 | return i2c_smbus_write_word_swapped(data->client, | ||
193 | INA2XX_CALIBRATION, val); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Initialize the configuration and calibration registers. | ||
198 | */ | ||
199 | static int ina2xx_init(struct ina2xx_data *data) | ||
114 | { | 200 | { |
115 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
116 | struct i2c_client *client = data->client; | 201 | struct i2c_client *client = data->client; |
117 | struct ina2xx_data *ret = data; | 202 | int ret; |
118 | 203 | ||
119 | mutex_lock(&data->update_lock); | 204 | /* device configuration */ |
205 | ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, | ||
206 | data->curr_config); | ||
207 | if (ret < 0) | ||
208 | return ret; | ||
120 | 209 | ||
121 | if (time_after(jiffies, data->last_updated + | 210 | /* |
122 | HZ / INA2XX_CONVERSION_RATE) || !data->valid) { | 211 | * Set current LSB to 1mA, shunt is in uOhms |
212 | * (equation 13 in datasheet). | ||
213 | */ | ||
214 | return ina2xx_calibrate(data); | ||
215 | } | ||
123 | 216 | ||
124 | int i; | 217 | static int ina2xx_do_update(struct device *dev) |
218 | { | ||
219 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
220 | struct i2c_client *client = data->client; | ||
221 | int i, rv, retry; | ||
125 | 222 | ||
126 | dev_dbg(&client->dev, "Starting ina2xx update\n"); | 223 | dev_dbg(&client->dev, "Starting ina2xx update\n"); |
127 | 224 | ||
225 | for (retry = 5; retry; retry--) { | ||
128 | /* Read all registers */ | 226 | /* Read all registers */ |
129 | for (i = 0; i < data->config->registers; i++) { | 227 | for (i = 0; i < data->config->registers; i++) { |
130 | int rv = i2c_smbus_read_word_swapped(client, i); | 228 | rv = i2c_smbus_read_word_swapped(client, i); |
131 | if (rv < 0) { | 229 | if (rv < 0) |
132 | ret = ERR_PTR(rv); | 230 | return rv; |
133 | goto abort; | ||
134 | } | ||
135 | data->regs[i] = rv; | 231 | data->regs[i] = rv; |
136 | } | 232 | } |
233 | |||
234 | /* | ||
235 | * If the current value in the calibration register is 0, the | ||
236 | * power and current registers will also remain at 0. In case | ||
237 | * the chip has been reset let's check the calibration | ||
238 | * register and reinitialize if needed. | ||
239 | */ | ||
240 | if (data->regs[INA2XX_CALIBRATION] == 0) { | ||
241 | dev_warn(dev, "chip not calibrated, reinitializing\n"); | ||
242 | |||
243 | rv = ina2xx_init(data); | ||
244 | if (rv < 0) | ||
245 | return rv; | ||
246 | |||
247 | /* | ||
248 | * Let's make sure the power and current registers | ||
249 | * have been updated before trying again. | ||
250 | */ | ||
251 | msleep(INA2XX_MAX_DELAY); | ||
252 | continue; | ||
253 | } | ||
254 | |||
137 | data->last_updated = jiffies; | 255 | data->last_updated = jiffies; |
138 | data->valid = 1; | 256 | data->valid = 1; |
257 | |||
258 | return 0; | ||
139 | } | 259 | } |
140 | abort: | 260 | |
261 | /* | ||
262 | * If we're here then although all write operations succeeded, the | ||
263 | * chip still returns 0 in the calibration register. Nothing more we | ||
264 | * can do here. | ||
265 | */ | ||
266 | dev_err(dev, "unable to reinitialize the chip\n"); | ||
267 | return -ENODEV; | ||
268 | } | ||
269 | |||
270 | static struct ina2xx_data *ina2xx_update_device(struct device *dev) | ||
271 | { | ||
272 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
273 | struct ina2xx_data *ret = data; | ||
274 | unsigned long after; | ||
275 | int rv; | ||
276 | |||
277 | mutex_lock(&data->update_lock); | ||
278 | |||
279 | after = data->last_updated + data->update_interval; | ||
280 | if (time_after(jiffies, after) || !data->valid) { | ||
281 | rv = ina2xx_do_update(dev); | ||
282 | if (rv < 0) | ||
283 | ret = ERR_PTR(rv); | ||
284 | } | ||
285 | |||
141 | mutex_unlock(&data->update_lock); | 286 | mutex_unlock(&data->update_lock); |
142 | return ret; | 287 | return ret; |
143 | } | 288 | } |
@@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg) | |||
164 | /* signed register, LSB=1mA (selected), in mA */ | 309 | /* signed register, LSB=1mA (selected), in mA */ |
165 | val = (s16)data->regs[reg]; | 310 | val = (s16)data->regs[reg]; |
166 | break; | 311 | break; |
312 | case INA2XX_CALIBRATION: | ||
313 | val = DIV_ROUND_CLOSEST(data->config->calibration_factor, | ||
314 | data->regs[reg]); | ||
315 | break; | ||
167 | default: | 316 | default: |
168 | /* programmer goofed */ | 317 | /* programmer goofed */ |
169 | WARN_ON_ONCE(1); | 318 | WARN_ON_ONCE(1); |
@@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev, | |||
187 | ina2xx_get_value(data, attr->index)); | 336 | ina2xx_get_value(data, attr->index)); |
188 | } | 337 | } |
189 | 338 | ||
339 | static ssize_t ina2xx_set_shunt(struct device *dev, | ||
340 | struct device_attribute *da, | ||
341 | const char *buf, size_t count) | ||
342 | { | ||
343 | struct ina2xx_data *data = ina2xx_update_device(dev); | ||
344 | unsigned long val; | ||
345 | int status; | ||
346 | |||
347 | if (IS_ERR(data)) | ||
348 | return PTR_ERR(data); | ||
349 | |||
350 | status = kstrtoul(buf, 10, &val); | ||
351 | if (status < 0) | ||
352 | return status; | ||
353 | |||
354 | if (val == 0 || | ||
355 | /* Values greater than the calibration factor make no sense. */ | ||
356 | val > data->config->calibration_factor) | ||
357 | return -EINVAL; | ||
358 | |||
359 | mutex_lock(&data->update_lock); | ||
360 | data->rshunt = val; | ||
361 | status = ina2xx_calibrate(data); | ||
362 | mutex_unlock(&data->update_lock); | ||
363 | if (status < 0) | ||
364 | return status; | ||
365 | |||
366 | return count; | ||
367 | } | ||
368 | |||
369 | static ssize_t ina226_set_interval(struct device *dev, | ||
370 | struct device_attribute *da, | ||
371 | const char *buf, size_t count) | ||
372 | { | ||
373 | struct ina2xx_data *data = dev_get_drvdata(dev); | ||
374 | unsigned long val; | ||
375 | int status; | ||
376 | |||
377 | status = kstrtoul(buf, 10, &val); | ||
378 | if (status < 0) | ||
379 | return status; | ||
380 | |||
381 | if (val > INT_MAX || val == 0) | ||
382 | return -EINVAL; | ||
383 | |||
384 | mutex_lock(&data->update_lock); | ||
385 | data->curr_config = ina226_interval_to_reg(val, | ||
386 | data->regs[INA2XX_CONFIG]); | ||
387 | status = i2c_smbus_write_word_swapped(data->client, | ||
388 | INA2XX_CONFIG, | ||
389 | data->curr_config); | ||
390 | |||
391 | ina226_set_update_interval(data); | ||
392 | /* Make sure the next access re-reads all registers. */ | ||
393 | data->valid = 0; | ||
394 | mutex_unlock(&data->update_lock); | ||
395 | if (status < 0) | ||
396 | return status; | ||
397 | |||
398 | return count; | ||
399 | } | ||
400 | |||
401 | static ssize_t ina226_show_interval(struct device *dev, | ||
402 | struct device_attribute *da, char *buf) | ||
403 | { | ||
404 | struct ina2xx_data *data = ina2xx_update_device(dev); | ||
405 | |||
406 | if (IS_ERR(data)) | ||
407 | return PTR_ERR(data); | ||
408 | |||
409 | /* | ||
410 | * We don't use data->update_interval here as we want to display | ||
411 | * the actual interval used by the chip and jiffies_to_msecs() | ||
412 | * doesn't seem to be accurate enough. | ||
413 | */ | ||
414 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
415 | ina226_reg_to_interval(data->regs[INA2XX_CONFIG])); | ||
416 | } | ||
417 | |||
190 | /* shunt voltage */ | 418 | /* shunt voltage */ |
191 | static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL, | 419 | static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL, |
192 | INA2XX_SHUNT_VOLTAGE); | 420 | INA2XX_SHUNT_VOLTAGE); |
@@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL, | |||
203 | static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, | 431 | static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, |
204 | INA2XX_POWER); | 432 | INA2XX_POWER); |
205 | 433 | ||
434 | /* shunt resistance */ | ||
435 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, | ||
436 | ina2xx_show_value, ina2xx_set_shunt, | ||
437 | INA2XX_CALIBRATION); | ||
438 | |||
439 | /* update interval (ina226 only) */ | ||
440 | static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, | ||
441 | ina226_show_interval, ina226_set_interval, 0); | ||
442 | |||
206 | /* pointers to created device attributes */ | 443 | /* pointers to created device attributes */ |
207 | static struct attribute *ina2xx_attrs[] = { | 444 | static struct attribute *ina2xx_attrs[] = { |
208 | &sensor_dev_attr_in0_input.dev_attr.attr, | 445 | &sensor_dev_attr_in0_input.dev_attr.attr, |
209 | &sensor_dev_attr_in1_input.dev_attr.attr, | 446 | &sensor_dev_attr_in1_input.dev_attr.attr, |
210 | &sensor_dev_attr_curr1_input.dev_attr.attr, | 447 | &sensor_dev_attr_curr1_input.dev_attr.attr, |
211 | &sensor_dev_attr_power1_input.dev_attr.attr, | 448 | &sensor_dev_attr_power1_input.dev_attr.attr, |
449 | &sensor_dev_attr_shunt_resistor.dev_attr.attr, | ||
212 | NULL, | 450 | NULL, |
213 | }; | 451 | }; |
214 | ATTRIBUTE_GROUPS(ina2xx); | 452 | |
453 | static const struct attribute_group ina2xx_group = { | ||
454 | .attrs = ina2xx_attrs, | ||
455 | }; | ||
456 | |||
457 | static struct attribute *ina226_attrs[] = { | ||
458 | &sensor_dev_attr_update_interval.dev_attr.attr, | ||
459 | NULL, | ||
460 | }; | ||
461 | |||
462 | static const struct attribute_group ina226_group = { | ||
463 | .attrs = ina226_attrs, | ||
464 | }; | ||
215 | 465 | ||
216 | static int ina2xx_probe(struct i2c_client *client, | 466 | static int ina2xx_probe(struct i2c_client *client, |
217 | const struct i2c_device_id *id) | 467 | const struct i2c_device_id *id) |
@@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client, | |||
221 | struct device *dev = &client->dev; | 471 | struct device *dev = &client->dev; |
222 | struct ina2xx_data *data; | 472 | struct ina2xx_data *data; |
223 | struct device *hwmon_dev; | 473 | struct device *hwmon_dev; |
224 | long shunt = 10000; /* default shunt value 10mOhms */ | ||
225 | u32 val; | 474 | u32 val; |
226 | int ret; | 475 | int ret, group = 0; |
227 | 476 | ||
228 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) | 477 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) |
229 | return -ENODEV; | 478 | return -ENODEV; |
@@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client, | |||
234 | 483 | ||
235 | if (dev_get_platdata(dev)) { | 484 | if (dev_get_platdata(dev)) { |
236 | pdata = dev_get_platdata(dev); | 485 | pdata = dev_get_platdata(dev); |
237 | shunt = pdata->shunt_uohms; | 486 | data->rshunt = pdata->shunt_uohms; |
238 | } else if (!of_property_read_u32(dev->of_node, | 487 | } else if (!of_property_read_u32(dev->of_node, |
239 | "shunt-resistor", &val)) { | 488 | "shunt-resistor", &val)) { |
240 | shunt = val; | 489 | data->rshunt = val; |
490 | } else { | ||
491 | data->rshunt = INA2XX_RSHUNT_DEFAULT; | ||
241 | } | 492 | } |
242 | 493 | ||
243 | if (shunt <= 0) | ||
244 | return -ENODEV; | ||
245 | |||
246 | /* set the device type */ | 494 | /* set the device type */ |
247 | data->kind = id->driver_data; | 495 | data->kind = id->driver_data; |
248 | data->config = &ina2xx_config[data->kind]; | 496 | data->config = &ina2xx_config[data->kind]; |
249 | 497 | data->curr_config = data->config->config_default; | |
250 | /* device configuration */ | 498 | data->client = client; |
251 | ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, | ||
252 | data->config->config_default); | ||
253 | if (ret < 0) { | ||
254 | dev_err(dev, | ||
255 | "error writing to the config register: %d", ret); | ||
256 | return -ENODEV; | ||
257 | } | ||
258 | 499 | ||
259 | /* | 500 | /* |
260 | * Set current LSB to 1mA, shunt is in uOhms | 501 | * Ina226 has a variable update_interval. For ina219 we |
261 | * (equation 13 in datasheet). | 502 | * use a constant value. |
262 | */ | 503 | */ |
263 | ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, | 504 | if (data->kind == ina226) |
264 | data->config->calibration_factor / shunt); | 505 | ina226_set_update_interval(data); |
506 | else | ||
507 | data->update_interval = HZ / INA2XX_CONVERSION_RATE; | ||
508 | |||
509 | if (data->rshunt <= 0 || | ||
510 | data->rshunt > data->config->calibration_factor) | ||
511 | return -ENODEV; | ||
512 | |||
513 | ret = ina2xx_init(data); | ||
265 | if (ret < 0) { | 514 | if (ret < 0) { |
266 | dev_err(dev, | 515 | dev_err(dev, "error configuring the device: %d\n", ret); |
267 | "error writing to the calibration register: %d", ret); | ||
268 | return -ENODEV; | 516 | return -ENODEV; |
269 | } | 517 | } |
270 | 518 | ||
271 | data->client = client; | ||
272 | mutex_init(&data->update_lock); | 519 | mutex_init(&data->update_lock); |
273 | 520 | ||
521 | data->groups[group++] = &ina2xx_group; | ||
522 | if (data->kind == ina226) | ||
523 | data->groups[group++] = &ina226_group; | ||
524 | |||
274 | hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, | 525 | hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, |
275 | data, ina2xx_groups); | 526 | data, data->groups); |
276 | if (IS_ERR(hwmon_dev)) | 527 | if (IS_ERR(hwmon_dev)) |
277 | return PTR_ERR(hwmon_dev); | 528 | return PTR_ERR(hwmon_dev); |
278 | 529 | ||
279 | dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", | 530 | dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n", |
280 | id->name, shunt); | 531 | id->name, data->rshunt); |
281 | 532 | ||
282 | return 0; | 533 | return 0; |
283 | } | 534 | } |
@@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = { | |||
287 | { "ina220", ina219 }, | 538 | { "ina220", ina219 }, |
288 | { "ina226", ina226 }, | 539 | { "ina226", ina226 }, |
289 | { "ina230", ina226 }, | 540 | { "ina230", ina226 }, |
541 | { "ina231", ina226 }, | ||
290 | { } | 542 | { } |
291 | }; | 543 | }; |
292 | MODULE_DEVICE_TABLE(i2c, ina2xx_id); | 544 | MODULE_DEVICE_TABLE(i2c, ina2xx_id); |
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 388f8bcd898e..996bdfd5cf25 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c | |||
@@ -201,7 +201,7 @@ struct jc42_data { | |||
201 | #define JC42_TEMP_MIN 0 | 201 | #define JC42_TEMP_MIN 0 |
202 | #define JC42_TEMP_MAX 125000 | 202 | #define JC42_TEMP_MAX 125000 |
203 | 203 | ||
204 | static u16 jc42_temp_to_reg(int temp, bool extended) | 204 | static u16 jc42_temp_to_reg(long temp, bool extended) |
205 | { | 205 | { |
206 | int ntemp = clamp_val(temp, | 206 | int ntemp = clamp_val(temp, |
207 | extended ? JC42_TEMP_MIN_EXTENDED : | 207 | extended ? JC42_TEMP_MIN_EXTENDED : |
@@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended) | |||
213 | 213 | ||
214 | static int jc42_temp_from_reg(s16 reg) | 214 | static int jc42_temp_from_reg(s16 reg) |
215 | { | 215 | { |
216 | reg &= 0x1fff; | 216 | reg = sign_extend32(reg, 12); |
217 | |||
218 | /* sign extend register */ | ||
219 | if (reg & 0x1000) | ||
220 | reg |= 0xf000; | ||
221 | 217 | ||
222 | /* convert from 0.0625 to 0.001 resolution */ | 218 | /* convert from 0.0625 to 0.001 resolution */ |
223 | return reg * 125 / 2; | 219 | return reg * 125 / 2; |
@@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev, | |||
308 | const char *buf, size_t count) | 304 | const char *buf, size_t count) |
309 | { | 305 | { |
310 | struct jc42_data *data = dev_get_drvdata(dev); | 306 | struct jc42_data *data = dev_get_drvdata(dev); |
311 | unsigned long val; | 307 | long val; |
312 | int diff, hyst; | 308 | int diff, hyst; |
313 | int err; | 309 | int err; |
314 | int ret = count; | 310 | int ret = count; |
315 | 311 | ||
316 | if (kstrtoul(buf, 10, &val) < 0) | 312 | if (kstrtol(buf, 10, &val) < 0) |
317 | return -EINVAL; | 313 | return -EINVAL; |
318 | 314 | ||
315 | val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED : | ||
316 | JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX); | ||
319 | diff = jc42_temp_from_reg(data->temp[t_crit]) - val; | 317 | diff = jc42_temp_from_reg(data->temp[t_crit]) - val; |
318 | |||
320 | hyst = 0; | 319 | hyst = 0; |
321 | if (diff > 0) { | 320 | if (diff > 0) { |
322 | if (diff < 2250) | 321 | if (diff < 2250) |
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index ec5678289e4a..55765790907b 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c | |||
@@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg) | |||
779 | return reg != REG_BANK && reg <= 0x20; | 779 | return reg != REG_BANK && reg <= 0x20; |
780 | } | 780 | } |
781 | 781 | ||
782 | static struct regmap_config nct7802_regmap_config = { | 782 | static const struct regmap_config nct7802_regmap_config = { |
783 | .reg_bits = 8, | 783 | .reg_bits = 8, |
784 | .val_bits = 8, | 784 | .val_bits = 8, |
785 | .cache_type = REGCACHE_RBTREE, | 785 | .cache_type = REGCACHE_RBTREE, |
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index ba9f478f64ee..9da2735f1424 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c | |||
@@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client) | |||
253 | return 0; | 253 | return 0; |
254 | } | 254 | } |
255 | 255 | ||
256 | #ifdef CONFIG_PM | 256 | #ifdef CONFIG_PM_SLEEP |
257 | static int tmp102_suspend(struct device *dev) | 257 | static int tmp102_suspend(struct device *dev) |
258 | { | 258 | { |
259 | struct i2c_client *client = to_i2c_client(dev); | 259 | struct i2c_client *client = to_i2c_client(dev); |
@@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev) | |||
279 | config &= ~TMP102_CONF_SD; | 279 | config &= ~TMP102_CONF_SD; |
280 | return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); | 280 | return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); |
281 | } | 281 | } |
282 | |||
283 | static const struct dev_pm_ops tmp102_dev_pm_ops = { | ||
284 | .suspend = tmp102_suspend, | ||
285 | .resume = tmp102_resume, | ||
286 | }; | ||
287 | |||
288 | #define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops) | ||
289 | #else | ||
290 | #define TMP102_DEV_PM_OPS NULL | ||
291 | #endif /* CONFIG_PM */ | 282 | #endif /* CONFIG_PM */ |
292 | 283 | ||
284 | static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume); | ||
285 | |||
293 | static const struct i2c_device_id tmp102_id[] = { | 286 | static const struct i2c_device_id tmp102_id[] = { |
294 | { "tmp102", 0 }, | 287 | { "tmp102", 0 }, |
295 | { } | 288 | { } |
@@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id); | |||
298 | 291 | ||
299 | static struct i2c_driver tmp102_driver = { | 292 | static struct i2c_driver tmp102_driver = { |
300 | .driver.name = DRIVER_NAME, | 293 | .driver.name = DRIVER_NAME, |
301 | .driver.pm = TMP102_DEV_PM_OPS, | 294 | .driver.pm = &tmp102_dev_pm_ops, |
302 | .probe = tmp102_probe, | 295 | .probe = tmp102_probe, |
303 | .remove = tmp102_remove, | 296 | .remove = tmp102_remove, |
304 | .id_table = tmp102_id, | 297 | .id_table = tmp102_id, |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 31e8308ba899..ab838d9e28b6 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -881,6 +881,7 @@ config I2C_XLR | |||
881 | config I2C_RCAR | 881 | config I2C_RCAR |
882 | tristate "Renesas R-Car I2C Controller" | 882 | tristate "Renesas R-Car I2C Controller" |
883 | depends on ARCH_SHMOBILE || COMPILE_TEST | 883 | depends on ARCH_SHMOBILE || COMPILE_TEST |
884 | select I2C_SLAVE | ||
884 | help | 885 | help |
885 | If you say yes to this option, support will be included for the | 886 | If you say yes to this option, support will be included for the |
886 | R-Car I2C controller. | 887 | R-Car I2C controller. |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index bff20a589621..958c8db4ec30 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, | |||
785 | int ret; | 785 | int ret; |
786 | 786 | ||
787 | pm_runtime_get_sync(&adap->dev); | 787 | pm_runtime_get_sync(&adap->dev); |
788 | clk_prepare_enable(i2c->clk); | 788 | ret = clk_enable(i2c->clk); |
789 | if (ret) | ||
790 | return ret; | ||
789 | 791 | ||
790 | for (retry = 0; retry < adap->retries; retry++) { | 792 | for (retry = 0; retry < adap->retries; retry++) { |
791 | 793 | ||
792 | ret = s3c24xx_i2c_doxfer(i2c, msgs, num); | 794 | ret = s3c24xx_i2c_doxfer(i2c, msgs, num); |
793 | 795 | ||
794 | if (ret != -EAGAIN) { | 796 | if (ret != -EAGAIN) { |
795 | clk_disable_unprepare(i2c->clk); | 797 | clk_disable(i2c->clk); |
796 | pm_runtime_put(&adap->dev); | 798 | pm_runtime_put(&adap->dev); |
797 | return ret; | 799 | return ret; |
798 | } | 800 | } |
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, | |||
802 | udelay(100); | 804 | udelay(100); |
803 | } | 805 | } |
804 | 806 | ||
805 | clk_disable_unprepare(i2c->clk); | 807 | clk_disable(i2c->clk); |
806 | pm_runtime_put(&adap->dev); | 808 | pm_runtime_put(&adap->dev); |
807 | return -EREMOTEIO; | 809 | return -EREMOTEIO; |
808 | } | 810 | } |
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1197 | 1199 | ||
1198 | clk_prepare_enable(i2c->clk); | 1200 | clk_prepare_enable(i2c->clk); |
1199 | ret = s3c24xx_i2c_init(i2c); | 1201 | ret = s3c24xx_i2c_init(i2c); |
1200 | clk_disable_unprepare(i2c->clk); | 1202 | clk_disable(i2c->clk); |
1201 | if (ret != 0) { | 1203 | if (ret != 0) { |
1202 | dev_err(&pdev->dev, "I2C controller init failed\n"); | 1204 | dev_err(&pdev->dev, "I2C controller init failed\n"); |
1203 | return ret; | 1205 | return ret; |
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1210 | i2c->irq = ret = platform_get_irq(pdev, 0); | 1212 | i2c->irq = ret = platform_get_irq(pdev, 0); |
1211 | if (ret <= 0) { | 1213 | if (ret <= 0) { |
1212 | dev_err(&pdev->dev, "cannot find IRQ\n"); | 1214 | dev_err(&pdev->dev, "cannot find IRQ\n"); |
1215 | clk_unprepare(i2c->clk); | ||
1213 | return ret; | 1216 | return ret; |
1214 | } | 1217 | } |
1215 | 1218 | ||
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1218 | 1221 | ||
1219 | if (ret != 0) { | 1222 | if (ret != 0) { |
1220 | dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); | 1223 | dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); |
1224 | clk_unprepare(i2c->clk); | ||
1221 | return ret; | 1225 | return ret; |
1222 | } | 1226 | } |
1223 | } | 1227 | } |
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1225 | ret = s3c24xx_i2c_register_cpufreq(i2c); | 1229 | ret = s3c24xx_i2c_register_cpufreq(i2c); |
1226 | if (ret < 0) { | 1230 | if (ret < 0) { |
1227 | dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); | 1231 | dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); |
1232 | clk_unprepare(i2c->clk); | ||
1228 | return ret; | 1233 | return ret; |
1229 | } | 1234 | } |
1230 | 1235 | ||
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1241 | if (ret < 0) { | 1246 | if (ret < 0) { |
1242 | dev_err(&pdev->dev, "failed to add bus to i2c core\n"); | 1247 | dev_err(&pdev->dev, "failed to add bus to i2c core\n"); |
1243 | s3c24xx_i2c_deregister_cpufreq(i2c); | 1248 | s3c24xx_i2c_deregister_cpufreq(i2c); |
1249 | clk_unprepare(i2c->clk); | ||
1244 | return ret; | 1250 | return ret; |
1245 | } | 1251 | } |
1246 | 1252 | ||
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) | |||
1262 | { | 1268 | { |
1263 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); | 1269 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); |
1264 | 1270 | ||
1271 | clk_unprepare(i2c->clk); | ||
1272 | |||
1265 | pm_runtime_disable(&i2c->adap.dev); | 1273 | pm_runtime_disable(&i2c->adap.dev); |
1266 | pm_runtime_disable(&pdev->dev); | 1274 | pm_runtime_disable(&pdev->dev); |
1267 | 1275 | ||
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev) | |||
1293 | { | 1301 | { |
1294 | struct platform_device *pdev = to_platform_device(dev); | 1302 | struct platform_device *pdev = to_platform_device(dev); |
1295 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); | 1303 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); |
1304 | int ret; | ||
1296 | 1305 | ||
1297 | if (!IS_ERR(i2c->sysreg)) | 1306 | if (!IS_ERR(i2c->sysreg)) |
1298 | regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); | 1307 | regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); |
1299 | 1308 | ||
1300 | clk_prepare_enable(i2c->clk); | 1309 | ret = clk_enable(i2c->clk); |
1310 | if (ret) | ||
1311 | return ret; | ||
1301 | s3c24xx_i2c_init(i2c); | 1312 | s3c24xx_i2c_init(i2c); |
1302 | clk_disable_unprepare(i2c->clk); | 1313 | clk_disable(i2c->clk); |
1303 | i2c->suspended = 0; | 1314 | i2c->suspended = 0; |
1304 | 1315 | ||
1305 | return 0; | 1316 | return 0; |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 440d5dbc8b5f..007818b3e174 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data { | |||
139 | int pos; | 139 | int pos; |
140 | int sr; | 140 | int sr; |
141 | bool send_stop; | 141 | bool send_stop; |
142 | bool stop_after_dma; | ||
142 | 143 | ||
143 | struct resource *res; | 144 | struct resource *res; |
144 | struct dma_chan *dma_tx; | 145 | struct dma_chan *dma_tx; |
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd) | |||
407 | 408 | ||
408 | if (pd->pos == pd->msg->len) { | 409 | if (pd->pos == pd->msg->len) { |
409 | /* Send stop if we haven't yet (DMA case) */ | 410 | /* Send stop if we haven't yet (DMA case) */ |
410 | if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY)) | 411 | if (pd->send_stop && pd->stop_after_dma) |
411 | i2c_op(pd, OP_TX_STOP, 0); | 412 | i2c_op(pd, OP_TX_STOP, 0); |
412 | return 1; | 413 | return 1; |
413 | } | 414 | } |
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) | |||
449 | real_pos = pd->pos - 2; | 450 | real_pos = pd->pos - 2; |
450 | 451 | ||
451 | if (pd->pos == pd->msg->len) { | 452 | if (pd->pos == pd->msg->len) { |
453 | if (pd->stop_after_dma) { | ||
454 | /* Simulate PIO end condition after DMA transfer */ | ||
455 | i2c_op(pd, OP_RX_STOP, 0); | ||
456 | pd->pos++; | ||
457 | break; | ||
458 | } | ||
459 | |||
452 | if (real_pos < 0) { | 460 | if (real_pos < 0) { |
453 | i2c_op(pd, OP_RX_STOP, 0); | 461 | i2c_op(pd, OP_RX_STOP, 0); |
454 | break; | 462 | break; |
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data) | |||
536 | 544 | ||
537 | sh_mobile_i2c_dma_unmap(pd); | 545 | sh_mobile_i2c_dma_unmap(pd); |
538 | pd->pos = pd->msg->len; | 546 | pd->pos = pd->msg->len; |
547 | pd->stop_after_dma = true; | ||
539 | 548 | ||
540 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); | 549 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); |
541 | } | 550 | } |
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
726 | bool do_start = pd->send_stop || !i; | 735 | bool do_start = pd->send_stop || !i; |
727 | msg = &msgs[i]; | 736 | msg = &msgs[i]; |
728 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; | 737 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; |
738 | pd->stop_after_dma = false; | ||
729 | 739 | ||
730 | err = start_ch(pd, msg, do_start); | 740 | err = start_ch(pd, msg, do_start); |
731 | if (err) | 741 | if (err) |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 39d25a8cb1ad..e9eae57a2b50 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -2972,6 +2972,7 @@ trace: | |||
2972 | } | 2972 | } |
2973 | EXPORT_SYMBOL(i2c_smbus_xfer); | 2973 | EXPORT_SYMBOL(i2c_smbus_xfer); |
2974 | 2974 | ||
2975 | #if IS_ENABLED(CONFIG_I2C_SLAVE) | ||
2975 | int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) | 2976 | int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) |
2976 | { | 2977 | { |
2977 | int ret; | 2978 | int ret; |
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client) | |||
3019 | return ret; | 3020 | return ret; |
3020 | } | 3021 | } |
3021 | EXPORT_SYMBOL_GPL(i2c_slave_unregister); | 3022 | EXPORT_SYMBOL_GPL(i2c_slave_unregister); |
3023 | #endif | ||
3022 | 3024 | ||
3023 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); | 3025 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); |
3024 | MODULE_DESCRIPTION("I2C-Bus main module"); | 3026 | MODULE_DESCRIPTION("I2C-Bus main module"); |
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 6631400b5f02..cf9b09db092f 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c | |||
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj | |||
74 | struct eeprom_data *eeprom; | 74 | struct eeprom_data *eeprom; |
75 | unsigned long flags; | 75 | unsigned long flags; |
76 | 76 | ||
77 | if (off + count >= attr->size) | 77 | if (off + count > attr->size) |
78 | return -EFBIG; | 78 | return -EFBIG; |
79 | 79 | ||
80 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); | 80 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); |
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob | |||
92 | struct eeprom_data *eeprom; | 92 | struct eeprom_data *eeprom; |
93 | unsigned long flags; | 93 | unsigned long flags; |
94 | 94 | ||
95 | if (off + count >= attr->size) | 95 | if (off + count > attr->size) |
96 | return -EFBIG; | 96 | return -EFBIG; |
97 | 97 | ||
98 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); | 98 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 9cceacb92f9d..1bc0c170f12a 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -727,6 +727,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { | |||
727 | ICPU(0x46, idle_cpu_hsw), | 727 | ICPU(0x46, idle_cpu_hsw), |
728 | ICPU(0x4d, idle_cpu_avn), | 728 | ICPU(0x4d, idle_cpu_avn), |
729 | ICPU(0x3d, idle_cpu_bdw), | 729 | ICPU(0x3d, idle_cpu_bdw), |
730 | ICPU(0x47, idle_cpu_bdw), | ||
730 | ICPU(0x4f, idle_cpu_bdw), | 731 | ICPU(0x4f, idle_cpu_bdw), |
731 | ICPU(0x56, idle_cpu_bdw), | 732 | ICPU(0x56, idle_cpu_bdw), |
732 | {} | 733 | {} |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index b716b0815644..643c08a025a5 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd); | |||
258 | 258 | ||
259 | IB_UVERBS_DECLARE_EX_CMD(create_flow); | 259 | IB_UVERBS_DECLARE_EX_CMD(create_flow); |
260 | IB_UVERBS_DECLARE_EX_CMD(destroy_flow); | 260 | IB_UVERBS_DECLARE_EX_CMD(destroy_flow); |
261 | IB_UVERBS_DECLARE_EX_CMD(query_device); | ||
262 | 261 | ||
263 | #endif /* UVERBS_H */ | 262 | #endif /* UVERBS_H */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 532d8eba8b02..b7943ff16ed3 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -400,52 +400,6 @@ err: | |||
400 | return ret; | 400 | return ret; |
401 | } | 401 | } |
402 | 402 | ||
403 | static void copy_query_dev_fields(struct ib_uverbs_file *file, | ||
404 | struct ib_uverbs_query_device_resp *resp, | ||
405 | struct ib_device_attr *attr) | ||
406 | { | ||
407 | resp->fw_ver = attr->fw_ver; | ||
408 | resp->node_guid = file->device->ib_dev->node_guid; | ||
409 | resp->sys_image_guid = attr->sys_image_guid; | ||
410 | resp->max_mr_size = attr->max_mr_size; | ||
411 | resp->page_size_cap = attr->page_size_cap; | ||
412 | resp->vendor_id = attr->vendor_id; | ||
413 | resp->vendor_part_id = attr->vendor_part_id; | ||
414 | resp->hw_ver = attr->hw_ver; | ||
415 | resp->max_qp = attr->max_qp; | ||
416 | resp->max_qp_wr = attr->max_qp_wr; | ||
417 | resp->device_cap_flags = attr->device_cap_flags; | ||
418 | resp->max_sge = attr->max_sge; | ||
419 | resp->max_sge_rd = attr->max_sge_rd; | ||
420 | resp->max_cq = attr->max_cq; | ||
421 | resp->max_cqe = attr->max_cqe; | ||
422 | resp->max_mr = attr->max_mr; | ||
423 | resp->max_pd = attr->max_pd; | ||
424 | resp->max_qp_rd_atom = attr->max_qp_rd_atom; | ||
425 | resp->max_ee_rd_atom = attr->max_ee_rd_atom; | ||
426 | resp->max_res_rd_atom = attr->max_res_rd_atom; | ||
427 | resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; | ||
428 | resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; | ||
429 | resp->atomic_cap = attr->atomic_cap; | ||
430 | resp->max_ee = attr->max_ee; | ||
431 | resp->max_rdd = attr->max_rdd; | ||
432 | resp->max_mw = attr->max_mw; | ||
433 | resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; | ||
434 | resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; | ||
435 | resp->max_mcast_grp = attr->max_mcast_grp; | ||
436 | resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; | ||
437 | resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; | ||
438 | resp->max_ah = attr->max_ah; | ||
439 | resp->max_fmr = attr->max_fmr; | ||
440 | resp->max_map_per_fmr = attr->max_map_per_fmr; | ||
441 | resp->max_srq = attr->max_srq; | ||
442 | resp->max_srq_wr = attr->max_srq_wr; | ||
443 | resp->max_srq_sge = attr->max_srq_sge; | ||
444 | resp->max_pkeys = attr->max_pkeys; | ||
445 | resp->local_ca_ack_delay = attr->local_ca_ack_delay; | ||
446 | resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt; | ||
447 | } | ||
448 | |||
449 | ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, | 403 | ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, |
450 | const char __user *buf, | 404 | const char __user *buf, |
451 | int in_len, int out_len) | 405 | int in_len, int out_len) |
@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, | |||
466 | return ret; | 420 | return ret; |
467 | 421 | ||
468 | memset(&resp, 0, sizeof resp); | 422 | memset(&resp, 0, sizeof resp); |
469 | copy_query_dev_fields(file, &resp, &attr); | 423 | |
424 | resp.fw_ver = attr.fw_ver; | ||
425 | resp.node_guid = file->device->ib_dev->node_guid; | ||
426 | resp.sys_image_guid = attr.sys_image_guid; | ||
427 | resp.max_mr_size = attr.max_mr_size; | ||
428 | resp.page_size_cap = attr.page_size_cap; | ||
429 | resp.vendor_id = attr.vendor_id; | ||
430 | resp.vendor_part_id = attr.vendor_part_id; | ||
431 | resp.hw_ver = attr.hw_ver; | ||
432 | resp.max_qp = attr.max_qp; | ||
433 | resp.max_qp_wr = attr.max_qp_wr; | ||
434 | resp.device_cap_flags = attr.device_cap_flags; | ||
435 | resp.max_sge = attr.max_sge; | ||
436 | resp.max_sge_rd = attr.max_sge_rd; | ||
437 | resp.max_cq = attr.max_cq; | ||
438 | resp.max_cqe = attr.max_cqe; | ||
439 | resp.max_mr = attr.max_mr; | ||
440 | resp.max_pd = attr.max_pd; | ||
441 | resp.max_qp_rd_atom = attr.max_qp_rd_atom; | ||
442 | resp.max_ee_rd_atom = attr.max_ee_rd_atom; | ||
443 | resp.max_res_rd_atom = attr.max_res_rd_atom; | ||
444 | resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; | ||
445 | resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; | ||
446 | resp.atomic_cap = attr.atomic_cap; | ||
447 | resp.max_ee = attr.max_ee; | ||
448 | resp.max_rdd = attr.max_rdd; | ||
449 | resp.max_mw = attr.max_mw; | ||
450 | resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; | ||
451 | resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; | ||
452 | resp.max_mcast_grp = attr.max_mcast_grp; | ||
453 | resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; | ||
454 | resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; | ||
455 | resp.max_ah = attr.max_ah; | ||
456 | resp.max_fmr = attr.max_fmr; | ||
457 | resp.max_map_per_fmr = attr.max_map_per_fmr; | ||
458 | resp.max_srq = attr.max_srq; | ||
459 | resp.max_srq_wr = attr.max_srq_wr; | ||
460 | resp.max_srq_sge = attr.max_srq_sge; | ||
461 | resp.max_pkeys = attr.max_pkeys; | ||
462 | resp.local_ca_ack_delay = attr.local_ca_ack_delay; | ||
463 | resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; | ||
470 | 464 | ||
471 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 465 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
472 | &resp, sizeof resp)) | 466 | &resp, sizeof resp)) |
@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, | |||
3293 | 3287 | ||
3294 | return ret ? ret : in_len; | 3288 | return ret ? ret : in_len; |
3295 | } | 3289 | } |
3296 | |||
3297 | int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, | ||
3298 | struct ib_udata *ucore, | ||
3299 | struct ib_udata *uhw) | ||
3300 | { | ||
3301 | struct ib_uverbs_ex_query_device_resp resp; | ||
3302 | struct ib_uverbs_ex_query_device cmd; | ||
3303 | struct ib_device_attr attr; | ||
3304 | struct ib_device *device; | ||
3305 | int err; | ||
3306 | |||
3307 | device = file->device->ib_dev; | ||
3308 | if (ucore->inlen < sizeof(cmd)) | ||
3309 | return -EINVAL; | ||
3310 | |||
3311 | err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); | ||
3312 | if (err) | ||
3313 | return err; | ||
3314 | |||
3315 | if (cmd.reserved) | ||
3316 | return -EINVAL; | ||
3317 | |||
3318 | err = device->query_device(device, &attr); | ||
3319 | if (err) | ||
3320 | return err; | ||
3321 | |||
3322 | memset(&resp, 0, sizeof(resp)); | ||
3323 | copy_query_dev_fields(file, &resp.base, &attr); | ||
3324 | resp.comp_mask = 0; | ||
3325 | |||
3326 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
3327 | if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) { | ||
3328 | resp.odp_caps.general_caps = attr.odp_caps.general_caps; | ||
3329 | resp.odp_caps.per_transport_caps.rc_odp_caps = | ||
3330 | attr.odp_caps.per_transport_caps.rc_odp_caps; | ||
3331 | resp.odp_caps.per_transport_caps.uc_odp_caps = | ||
3332 | attr.odp_caps.per_transport_caps.uc_odp_caps; | ||
3333 | resp.odp_caps.per_transport_caps.ud_odp_caps = | ||
3334 | attr.odp_caps.per_transport_caps.ud_odp_caps; | ||
3335 | resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP; | ||
3336 | } | ||
3337 | #endif | ||
3338 | |||
3339 | err = ib_copy_to_udata(ucore, &resp, sizeof(resp)); | ||
3340 | if (err) | ||
3341 | return err; | ||
3342 | |||
3343 | return 0; | ||
3344 | } | ||
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index e6c23b9eab33..5db1a8cc388d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, | |||
123 | struct ib_udata *uhw) = { | 123 | struct ib_udata *uhw) = { |
124 | [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, | 124 | [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, |
125 | [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, | 125 | [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, |
126 | [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device | ||
127 | }; | 126 | }; |
128 | 127 | ||
129 | static void ib_uverbs_add_one(struct ib_device *device); | 128 | static void ib_uverbs_add_one(struct ib_device *device); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8a87404e9c76..03bf81211a54 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
1331 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | | 1331 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | |
1332 | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | | 1332 | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | |
1333 | (1ull << IB_USER_VERBS_CMD_OPEN_QP); | 1333 | (1ull << IB_USER_VERBS_CMD_OPEN_QP); |
1334 | dev->ib_dev.uverbs_ex_cmd_mask = | ||
1335 | (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); | ||
1336 | 1334 | ||
1337 | dev->ib_dev.query_device = mlx5_ib_query_device; | 1335 | dev->ib_dev.query_device = mlx5_ib_query_device; |
1338 | dev->ib_dev.query_port = mlx5_ib_query_port; | 1336 | dev->ib_dev.query_port = mlx5_ib_query_port; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 8ba80a6d3a46..d7562beb5423 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -98,15 +98,9 @@ enum { | |||
98 | 98 | ||
99 | IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ | 99 | IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */ |
100 | IPOIB_MCAST_FLAG_SENDONLY = 1, | 100 | IPOIB_MCAST_FLAG_SENDONLY = 1, |
101 | /* | 101 | IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ |
102 | * For IPOIB_MCAST_FLAG_BUSY | ||
103 | * When set, in flight join and mcast->mc is unreliable | ||
104 | * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or | ||
105 | * haven't started yet | ||
106 | * When clear and mcast->mc is valid pointer, join was successful | ||
107 | */ | ||
108 | IPOIB_MCAST_FLAG_BUSY = 2, | ||
109 | IPOIB_MCAST_FLAG_ATTACHED = 3, | 102 | IPOIB_MCAST_FLAG_ATTACHED = 3, |
103 | IPOIB_MCAST_JOIN_STARTED = 4, | ||
110 | 104 | ||
111 | MAX_SEND_CQE = 16, | 105 | MAX_SEND_CQE = 16, |
112 | IPOIB_CM_COPYBREAK = 256, | 106 | IPOIB_CM_COPYBREAK = 256, |
@@ -323,7 +317,6 @@ struct ipoib_dev_priv { | |||
323 | struct list_head multicast_list; | 317 | struct list_head multicast_list; |
324 | struct rb_root multicast_tree; | 318 | struct rb_root multicast_tree; |
325 | 319 | ||
326 | struct workqueue_struct *wq; | ||
327 | struct delayed_work mcast_task; | 320 | struct delayed_work mcast_task; |
328 | struct work_struct carrier_on_task; | 321 | struct work_struct carrier_on_task; |
329 | struct work_struct flush_light; | 322 | struct work_struct flush_light; |
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work); | |||
484 | void ipoib_pkey_event(struct work_struct *work); | 477 | void ipoib_pkey_event(struct work_struct *work); |
485 | void ipoib_ib_dev_cleanup(struct net_device *dev); | 478 | void ipoib_ib_dev_cleanup(struct net_device *dev); |
486 | 479 | ||
487 | int ipoib_ib_dev_open(struct net_device *dev); | 480 | int ipoib_ib_dev_open(struct net_device *dev, int flush); |
488 | int ipoib_ib_dev_up(struct net_device *dev); | 481 | int ipoib_ib_dev_up(struct net_device *dev); |
489 | int ipoib_ib_dev_down(struct net_device *dev); | 482 | int ipoib_ib_dev_down(struct net_device *dev, int flush); |
490 | int ipoib_ib_dev_stop(struct net_device *dev); | 483 | int ipoib_ib_dev_stop(struct net_device *dev, int flush); |
491 | void ipoib_pkey_dev_check_presence(struct net_device *dev); | 484 | void ipoib_pkey_dev_check_presence(struct net_device *dev); |
492 | 485 | ||
493 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 486 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); | |||
499 | 492 | ||
500 | void ipoib_mcast_restart_task(struct work_struct *work); | 493 | void ipoib_mcast_restart_task(struct work_struct *work); |
501 | int ipoib_mcast_start_thread(struct net_device *dev); | 494 | int ipoib_mcast_start_thread(struct net_device *dev); |
502 | int ipoib_mcast_stop_thread(struct net_device *dev); | 495 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); |
503 | 496 | ||
504 | void ipoib_mcast_dev_down(struct net_device *dev); | 497 | void ipoib_mcast_dev_down(struct net_device *dev); |
505 | void ipoib_mcast_dev_flush(struct net_device *dev); | 498 | void ipoib_mcast_dev_flush(struct net_device *dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 56959adb6c7d..933efcea0d03 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
474 | } | 474 | } |
475 | 475 | ||
476 | spin_lock_irq(&priv->lock); | 476 | spin_lock_irq(&priv->lock); |
477 | queue_delayed_work(priv->wq, | 477 | queue_delayed_work(ipoib_workqueue, |
478 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | 478 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); |
479 | /* Add this entry to passive ids list head, but do not re-add it | 479 | /* Add this entry to passive ids list head, but do not re-add it |
480 | * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ | 480 | * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ |
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
576 | spin_lock_irqsave(&priv->lock, flags); | 576 | spin_lock_irqsave(&priv->lock, flags); |
577 | list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); | 577 | list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); |
578 | ipoib_cm_start_rx_drain(priv); | 578 | ipoib_cm_start_rx_drain(priv); |
579 | queue_work(priv->wq, &priv->cm.rx_reap_task); | 579 | queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); |
580 | spin_unlock_irqrestore(&priv->lock, flags); | 580 | spin_unlock_irqrestore(&priv->lock, flags); |
581 | } else | 581 | } else |
582 | ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", | 582 | ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", |
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
603 | spin_lock_irqsave(&priv->lock, flags); | 603 | spin_lock_irqsave(&priv->lock, flags); |
604 | list_move(&p->list, &priv->cm.rx_reap_list); | 604 | list_move(&p->list, &priv->cm.rx_reap_list); |
605 | spin_unlock_irqrestore(&priv->lock, flags); | 605 | spin_unlock_irqrestore(&priv->lock, flags); |
606 | queue_work(priv->wq, &priv->cm.rx_reap_task); | 606 | queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); |
607 | } | 607 | } |
608 | return; | 608 | return; |
609 | } | 609 | } |
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
827 | 827 | ||
828 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | 828 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
829 | list_move(&tx->list, &priv->cm.reap_list); | 829 | list_move(&tx->list, &priv->cm.reap_list); |
830 | queue_work(priv->wq, &priv->cm.reap_task); | 830 | queue_work(ipoib_workqueue, &priv->cm.reap_task); |
831 | } | 831 | } |
832 | 832 | ||
833 | clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); | 833 | clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); |
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |||
1255 | 1255 | ||
1256 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | 1256 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
1257 | list_move(&tx->list, &priv->cm.reap_list); | 1257 | list_move(&tx->list, &priv->cm.reap_list); |
1258 | queue_work(priv->wq, &priv->cm.reap_task); | 1258 | queue_work(ipoib_workqueue, &priv->cm.reap_task); |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | spin_unlock_irqrestore(&priv->lock, flags); | 1261 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path | |||
1284 | tx->dev = dev; | 1284 | tx->dev = dev; |
1285 | list_add(&tx->list, &priv->cm.start_list); | 1285 | list_add(&tx->list, &priv->cm.start_list); |
1286 | set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); | 1286 | set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); |
1287 | queue_work(priv->wq, &priv->cm.start_task); | 1287 | queue_work(ipoib_workqueue, &priv->cm.start_task); |
1288 | return tx; | 1288 | return tx; |
1289 | } | 1289 | } |
1290 | 1290 | ||
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) | |||
1295 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | 1295 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
1296 | spin_lock_irqsave(&priv->lock, flags); | 1296 | spin_lock_irqsave(&priv->lock, flags); |
1297 | list_move(&tx->list, &priv->cm.reap_list); | 1297 | list_move(&tx->list, &priv->cm.reap_list); |
1298 | queue_work(priv->wq, &priv->cm.reap_task); | 1298 | queue_work(ipoib_workqueue, &priv->cm.reap_task); |
1299 | ipoib_dbg(priv, "Reap connection for gid %pI6\n", | 1299 | ipoib_dbg(priv, "Reap connection for gid %pI6\n", |
1300 | tx->neigh->daddr + 4); | 1300 | tx->neigh->daddr + 4); |
1301 | tx->neigh = NULL; | 1301 | tx->neigh = NULL; |
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, | |||
1417 | 1417 | ||
1418 | skb_queue_tail(&priv->cm.skb_queue, skb); | 1418 | skb_queue_tail(&priv->cm.skb_queue, skb); |
1419 | if (e) | 1419 | if (e) |
1420 | queue_work(priv->wq, &priv->cm.skb_task); | 1420 | queue_work(ipoib_workqueue, &priv->cm.skb_task); |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | static void ipoib_cm_rx_reap(struct work_struct *work) | 1423 | static void ipoib_cm_rx_reap(struct work_struct *work) |
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work) | |||
1450 | } | 1450 | } |
1451 | 1451 | ||
1452 | if (!list_empty(&priv->cm.passive_ids)) | 1452 | if (!list_empty(&priv->cm.passive_ids)) |
1453 | queue_delayed_work(priv->wq, | 1453 | queue_delayed_work(ipoib_workqueue, |
1454 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | 1454 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); |
1455 | spin_unlock_irq(&priv->lock); | 1455 | spin_unlock_irq(&priv->lock); |
1456 | } | 1456 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index fe65abb5150c..72626c348174 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work) | |||
655 | __ipoib_reap_ah(dev); | 655 | __ipoib_reap_ah(dev); |
656 | 656 | ||
657 | if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) | 657 | if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) |
658 | queue_delayed_work(priv->wq, &priv->ah_reap_task, | 658 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, |
659 | round_jiffies_relative(HZ)); | 659 | round_jiffies_relative(HZ)); |
660 | } | 660 | } |
661 | 661 | ||
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx) | |||
664 | drain_tx_cq((struct net_device *)ctx); | 664 | drain_tx_cq((struct net_device *)ctx); |
665 | } | 665 | } |
666 | 666 | ||
667 | int ipoib_ib_dev_open(struct net_device *dev) | 667 | int ipoib_ib_dev_open(struct net_device *dev, int flush) |
668 | { | 668 | { |
669 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 669 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
670 | int ret; | 670 | int ret; |
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev) | |||
696 | } | 696 | } |
697 | 697 | ||
698 | clear_bit(IPOIB_STOP_REAPER, &priv->flags); | 698 | clear_bit(IPOIB_STOP_REAPER, &priv->flags); |
699 | queue_delayed_work(priv->wq, &priv->ah_reap_task, | 699 | queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, |
700 | round_jiffies_relative(HZ)); | 700 | round_jiffies_relative(HZ)); |
701 | 701 | ||
702 | if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) | 702 | if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) |
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev) | |||
706 | dev_stop: | 706 | dev_stop: |
707 | if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) | 707 | if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) |
708 | napi_enable(&priv->napi); | 708 | napi_enable(&priv->napi); |
709 | ipoib_ib_dev_stop(dev); | 709 | ipoib_ib_dev_stop(dev, flush); |
710 | return -1; | 710 | return -1; |
711 | } | 711 | } |
712 | 712 | ||
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev) | |||
738 | return ipoib_mcast_start_thread(dev); | 738 | return ipoib_mcast_start_thread(dev); |
739 | } | 739 | } |
740 | 740 | ||
741 | int ipoib_ib_dev_down(struct net_device *dev) | 741 | int ipoib_ib_dev_down(struct net_device *dev, int flush) |
742 | { | 742 | { |
743 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 743 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
744 | 744 | ||
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev) | |||
747 | clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); | 747 | clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); |
748 | netif_carrier_off(dev); | 748 | netif_carrier_off(dev); |
749 | 749 | ||
750 | ipoib_mcast_stop_thread(dev); | 750 | ipoib_mcast_stop_thread(dev, flush); |
751 | ipoib_mcast_dev_flush(dev); | 751 | ipoib_mcast_dev_flush(dev); |
752 | 752 | ||
753 | ipoib_flush_paths(dev); | 753 | ipoib_flush_paths(dev); |
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev) | |||
807 | local_bh_enable(); | 807 | local_bh_enable(); |
808 | } | 808 | } |
809 | 809 | ||
810 | int ipoib_ib_dev_stop(struct net_device *dev) | 810 | int ipoib_ib_dev_stop(struct net_device *dev, int flush) |
811 | { | 811 | { |
812 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 812 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
813 | struct ib_qp_attr qp_attr; | 813 | struct ib_qp_attr qp_attr; |
@@ -880,7 +880,8 @@ timeout: | |||
880 | /* Wait for all AHs to be reaped */ | 880 | /* Wait for all AHs to be reaped */ |
881 | set_bit(IPOIB_STOP_REAPER, &priv->flags); | 881 | set_bit(IPOIB_STOP_REAPER, &priv->flags); |
882 | cancel_delayed_work(&priv->ah_reap_task); | 882 | cancel_delayed_work(&priv->ah_reap_task); |
883 | flush_workqueue(priv->wq); | 883 | if (flush) |
884 | flush_workqueue(ipoib_workqueue); | ||
884 | 885 | ||
885 | begin = jiffies; | 886 | begin = jiffies; |
886 | 887 | ||
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
917 | (unsigned long) dev); | 918 | (unsigned long) dev); |
918 | 919 | ||
919 | if (dev->flags & IFF_UP) { | 920 | if (dev->flags & IFF_UP) { |
920 | if (ipoib_ib_dev_open(dev)) { | 921 | if (ipoib_ib_dev_open(dev, 1)) { |
921 | ipoib_transport_dev_cleanup(dev); | 922 | ipoib_transport_dev_cleanup(dev); |
922 | return -ENODEV; | 923 | return -ENODEV; |
923 | } | 924 | } |
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
1039 | } | 1040 | } |
1040 | 1041 | ||
1041 | if (level >= IPOIB_FLUSH_NORMAL) | 1042 | if (level >= IPOIB_FLUSH_NORMAL) |
1042 | ipoib_ib_dev_down(dev); | 1043 | ipoib_ib_dev_down(dev, 0); |
1043 | 1044 | ||
1044 | if (level == IPOIB_FLUSH_HEAVY) { | 1045 | if (level == IPOIB_FLUSH_HEAVY) { |
1045 | if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) | 1046 | if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) |
1046 | ipoib_ib_dev_stop(dev); | 1047 | ipoib_ib_dev_stop(dev, 0); |
1047 | if (ipoib_ib_dev_open(dev) != 0) | 1048 | if (ipoib_ib_dev_open(dev, 0) != 0) |
1048 | return; | 1049 | return; |
1049 | if (netif_queue_stopped(dev)) | 1050 | if (netif_queue_stopped(dev)) |
1050 | netif_start_queue(dev); | 1051 | netif_start_queue(dev); |
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
1096 | */ | 1097 | */ |
1097 | ipoib_flush_paths(dev); | 1098 | ipoib_flush_paths(dev); |
1098 | 1099 | ||
1099 | ipoib_mcast_stop_thread(dev); | 1100 | ipoib_mcast_stop_thread(dev, 1); |
1100 | ipoib_mcast_dev_flush(dev); | 1101 | ipoib_mcast_dev_flush(dev); |
1101 | 1102 | ||
1102 | ipoib_transport_dev_cleanup(dev); | 1103 | ipoib_transport_dev_cleanup(dev); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6bad17d4d588..58b5aa3b6f2d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev) | |||
108 | 108 | ||
109 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); | 109 | set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); |
110 | 110 | ||
111 | if (ipoib_ib_dev_open(dev)) { | 111 | if (ipoib_ib_dev_open(dev, 1)) { |
112 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) | 112 | if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) |
113 | return 0; | 113 | return 0; |
114 | goto err_disable; | 114 | goto err_disable; |
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev) | |||
139 | return 0; | 139 | return 0; |
140 | 140 | ||
141 | err_stop: | 141 | err_stop: |
142 | ipoib_ib_dev_stop(dev); | 142 | ipoib_ib_dev_stop(dev, 1); |
143 | 143 | ||
144 | err_disable: | 144 | err_disable: |
145 | clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); | 145 | clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); |
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev) | |||
157 | 157 | ||
158 | netif_stop_queue(dev); | 158 | netif_stop_queue(dev); |
159 | 159 | ||
160 | ipoib_ib_dev_down(dev); | 160 | ipoib_ib_dev_down(dev, 1); |
161 | ipoib_ib_dev_stop(dev); | 161 | ipoib_ib_dev_stop(dev, 0); |
162 | 162 | ||
163 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 163 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
164 | struct ipoib_dev_priv *cpriv; | 164 | struct ipoib_dev_priv *cpriv; |
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev) | |||
839 | return; | 839 | return; |
840 | } | 840 | } |
841 | 841 | ||
842 | queue_work(priv->wq, &priv->restart_task); | 842 | queue_work(ipoib_workqueue, &priv->restart_task); |
843 | } | 843 | } |
844 | 844 | ||
845 | static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) | 845 | static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) |
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work) | |||
954 | __ipoib_reap_neigh(priv); | 954 | __ipoib_reap_neigh(priv); |
955 | 955 | ||
956 | if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) | 956 | if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) |
957 | queue_delayed_work(priv->wq, &priv->neigh_reap_task, | 957 | queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, |
958 | arp_tbl.gc_interval); | 958 | arp_tbl.gc_interval); |
959 | } | 959 | } |
960 | 960 | ||
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) | |||
1133 | 1133 | ||
1134 | /* start garbage collection */ | 1134 | /* start garbage collection */ |
1135 | clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); | 1135 | clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); |
1136 | queue_delayed_work(priv->wq, &priv->neigh_reap_task, | 1136 | queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, |
1137 | arp_tbl.gc_interval); | 1137 | arp_tbl.gc_interval); |
1138 | 1138 | ||
1139 | return 0; | 1139 | return 0; |
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
1262 | { | 1262 | { |
1263 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 1263 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
1264 | 1264 | ||
1265 | if (ipoib_neigh_hash_init(priv) < 0) | ||
1266 | goto out; | ||
1265 | /* Allocate RX/TX "rings" to hold queued skbs */ | 1267 | /* Allocate RX/TX "rings" to hold queued skbs */ |
1266 | priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, | 1268 | priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, |
1267 | GFP_KERNEL); | 1269 | GFP_KERNEL); |
1268 | if (!priv->rx_ring) { | 1270 | if (!priv->rx_ring) { |
1269 | printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", | 1271 | printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", |
1270 | ca->name, ipoib_recvq_size); | 1272 | ca->name, ipoib_recvq_size); |
1271 | goto out; | 1273 | goto out_neigh_hash_cleanup; |
1272 | } | 1274 | } |
1273 | 1275 | ||
1274 | priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); | 1276 | priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); |
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
1283 | if (ipoib_ib_dev_init(dev, ca, port)) | 1285 | if (ipoib_ib_dev_init(dev, ca, port)) |
1284 | goto out_tx_ring_cleanup; | 1286 | goto out_tx_ring_cleanup; |
1285 | 1287 | ||
1286 | /* | ||
1287 | * Must be after ipoib_ib_dev_init so we can allocate a per | ||
1288 | * device wq there and use it here | ||
1289 | */ | ||
1290 | if (ipoib_neigh_hash_init(priv) < 0) | ||
1291 | goto out_dev_uninit; | ||
1292 | |||
1293 | return 0; | 1288 | return 0; |
1294 | 1289 | ||
1295 | out_dev_uninit: | ||
1296 | ipoib_ib_dev_cleanup(dev); | ||
1297 | |||
1298 | out_tx_ring_cleanup: | 1290 | out_tx_ring_cleanup: |
1299 | vfree(priv->tx_ring); | 1291 | vfree(priv->tx_ring); |
1300 | 1292 | ||
1301 | out_rx_ring_cleanup: | 1293 | out_rx_ring_cleanup: |
1302 | kfree(priv->rx_ring); | 1294 | kfree(priv->rx_ring); |
1303 | 1295 | ||
1296 | out_neigh_hash_cleanup: | ||
1297 | ipoib_neigh_hash_uninit(dev); | ||
1304 | out: | 1298 | out: |
1305 | return -ENOMEM; | 1299 | return -ENOMEM; |
1306 | } | 1300 | } |
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev) | |||
1323 | } | 1317 | } |
1324 | unregister_netdevice_many(&head); | 1318 | unregister_netdevice_many(&head); |
1325 | 1319 | ||
1326 | /* | ||
1327 | * Must be before ipoib_ib_dev_cleanup or we delete an in use | ||
1328 | * work queue | ||
1329 | */ | ||
1330 | ipoib_neigh_hash_uninit(dev); | ||
1331 | |||
1332 | ipoib_ib_dev_cleanup(dev); | 1320 | ipoib_ib_dev_cleanup(dev); |
1333 | 1321 | ||
1334 | kfree(priv->rx_ring); | 1322 | kfree(priv->rx_ring); |
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev) | |||
1336 | 1324 | ||
1337 | priv->rx_ring = NULL; | 1325 | priv->rx_ring = NULL; |
1338 | priv->tx_ring = NULL; | 1326 | priv->tx_ring = NULL; |
1327 | |||
1328 | ipoib_neigh_hash_uninit(dev); | ||
1339 | } | 1329 | } |
1340 | 1330 | ||
1341 | static const struct header_ops ipoib_header_ops = { | 1331 | static const struct header_ops ipoib_header_ops = { |
@@ -1646,7 +1636,7 @@ register_failed: | |||
1646 | /* Stop GC if started before flush */ | 1636 | /* Stop GC if started before flush */ |
1647 | set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); | 1637 | set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); |
1648 | cancel_delayed_work(&priv->neigh_reap_task); | 1638 | cancel_delayed_work(&priv->neigh_reap_task); |
1649 | flush_workqueue(priv->wq); | 1639 | flush_workqueue(ipoib_workqueue); |
1650 | 1640 | ||
1651 | event_failed: | 1641 | event_failed: |
1652 | ipoib_dev_cleanup(priv->dev); | 1642 | ipoib_dev_cleanup(priv->dev); |
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device) | |||
1717 | /* Stop GC */ | 1707 | /* Stop GC */ |
1718 | set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); | 1708 | set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); |
1719 | cancel_delayed_work(&priv->neigh_reap_task); | 1709 | cancel_delayed_work(&priv->neigh_reap_task); |
1720 | flush_workqueue(priv->wq); | 1710 | flush_workqueue(ipoib_workqueue); |
1721 | 1711 | ||
1722 | unregister_netdev(priv->dev); | 1712 | unregister_netdev(priv->dev); |
1723 | free_netdev(priv->dev); | 1713 | free_netdev(priv->dev); |
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void) | |||
1758 | * unregister_netdev() and linkwatch_event take the rtnl lock, | 1748 | * unregister_netdev() and linkwatch_event take the rtnl lock, |
1759 | * so flush_scheduled_work() can deadlock during device | 1749 | * so flush_scheduled_work() can deadlock during device |
1760 | * removal. | 1750 | * removal. |
1761 | * | ||
1762 | * In addition, bringing one device up and another down at the | ||
1763 | * same time can deadlock a single workqueue, so we have this | ||
1764 | * global fallback workqueue, but we also attempt to open a | ||
1765 | * per device workqueue each time we bring an interface up | ||
1766 | */ | 1751 | */ |
1767 | ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); | 1752 | ipoib_workqueue = create_singlethread_workqueue("ipoib"); |
1768 | if (!ipoib_workqueue) { | 1753 | if (!ipoib_workqueue) { |
1769 | ret = -ENOMEM; | 1754 | ret = -ENOMEM; |
1770 | goto err_fs; | 1755 | goto err_fs; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index bc50dd0d0e4d..ffb83b5f7e80 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, | |||
190 | spin_unlock_irq(&priv->lock); | 190 | spin_unlock_irq(&priv->lock); |
191 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; | 191 | priv->tx_wr.wr.ud.remote_qkey = priv->qkey; |
192 | set_qkey = 1; | 192 | set_qkey = 1; |
193 | |||
194 | if (!ipoib_cm_admin_enabled(dev)) { | ||
195 | rtnl_lock(); | ||
196 | dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); | ||
197 | rtnl_unlock(); | ||
198 | } | ||
193 | } | 199 | } |
194 | 200 | ||
195 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { | 201 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { |
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status, | |||
271 | struct ipoib_mcast *mcast = multicast->context; | 277 | struct ipoib_mcast *mcast = multicast->context; |
272 | struct net_device *dev = mcast->dev; | 278 | struct net_device *dev = mcast->dev; |
273 | 279 | ||
274 | /* | ||
275 | * We have to take the mutex to force mcast_sendonly_join to | ||
276 | * return from ib_sa_multicast_join and set mcast->mc to a | ||
277 | * valid value. Otherwise we were racing with ourselves in | ||
278 | * that we might fail here, but get a valid return from | ||
279 | * ib_sa_multicast_join after we had cleared mcast->mc here, | ||
280 | * resulting in mis-matched joins and leaves and a deadlock | ||
281 | */ | ||
282 | mutex_lock(&mcast_mutex); | ||
283 | |||
284 | /* We trap for port events ourselves. */ | 280 | /* We trap for port events ourselves. */ |
285 | if (status == -ENETRESET) | 281 | if (status == -ENETRESET) |
286 | goto out; | 282 | return 0; |
287 | 283 | ||
288 | if (!status) | 284 | if (!status) |
289 | status = ipoib_mcast_join_finish(mcast, &multicast->rec); | 285 | status = ipoib_mcast_join_finish(mcast, &multicast->rec); |
290 | 286 | ||
291 | if (status) { | 287 | if (status) { |
292 | if (mcast->logcount++ < 20) | 288 | if (mcast->logcount++ < 20) |
293 | ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast " | 289 | ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", |
294 | "join failed for %pI6, status %d\n", | ||
295 | mcast->mcmember.mgid.raw, status); | 290 | mcast->mcmember.mgid.raw, status); |
296 | 291 | ||
297 | /* Flush out any queued packets */ | 292 | /* Flush out any queued packets */ |
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status, | |||
301 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); | 296 | dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); |
302 | } | 297 | } |
303 | netif_tx_unlock_bh(dev); | 298 | netif_tx_unlock_bh(dev); |
299 | |||
300 | /* Clear the busy flag so we try again */ | ||
301 | status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, | ||
302 | &mcast->flags); | ||
304 | } | 303 | } |
305 | out: | ||
306 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | ||
307 | if (status) | ||
308 | mcast->mc = NULL; | ||
309 | complete(&mcast->done); | ||
310 | if (status == -ENETRESET) | ||
311 | status = 0; | ||
312 | mutex_unlock(&mcast_mutex); | ||
313 | return status; | 304 | return status; |
314 | } | 305 | } |
315 | 306 | ||
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) | |||
327 | int ret = 0; | 318 | int ret = 0; |
328 | 319 | ||
329 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { | 320 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { |
330 | ipoib_dbg_mcast(priv, "device shutting down, no sendonly " | 321 | ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); |
331 | "multicast joins\n"); | ||
332 | return -ENODEV; | 322 | return -ENODEV; |
333 | } | 323 | } |
334 | 324 | ||
335 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { | 325 | if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { |
336 | ipoib_dbg_mcast(priv, "multicast entry busy, skipping " | 326 | ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); |
337 | "sendonly join\n"); | ||
338 | return -EBUSY; | 327 | return -EBUSY; |
339 | } | 328 | } |
340 | 329 | ||
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) | |||
342 | rec.port_gid = priv->local_gid; | 331 | rec.port_gid = priv->local_gid; |
343 | rec.pkey = cpu_to_be16(priv->pkey); | 332 | rec.pkey = cpu_to_be16(priv->pkey); |
344 | 333 | ||
345 | mutex_lock(&mcast_mutex); | ||
346 | init_completion(&mcast->done); | ||
347 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | ||
348 | mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, | 334 | mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, |
349 | priv->port, &rec, | 335 | priv->port, &rec, |
350 | IB_SA_MCMEMBER_REC_MGID | | 336 | IB_SA_MCMEMBER_REC_MGID | |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) | |||
357 | if (IS_ERR(mcast->mc)) { | 343 | if (IS_ERR(mcast->mc)) { |
358 | ret = PTR_ERR(mcast->mc); | 344 | ret = PTR_ERR(mcast->mc); |
359 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | 345 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); |
360 | complete(&mcast->done); | 346 | ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", |
361 | ipoib_warn(priv, "ib_sa_join_multicast for sendonly join " | 347 | ret); |
362 | "failed (ret = %d)\n", ret); | ||
363 | } else { | 348 | } else { |
364 | ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting " | 349 | ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", |
365 | "sendonly join\n", mcast->mcmember.mgid.raw); | 350 | mcast->mcmember.mgid.raw); |
366 | } | 351 | } |
367 | mutex_unlock(&mcast_mutex); | ||
368 | 352 | ||
369 | return ret; | 353 | return ret; |
370 | } | 354 | } |
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) | |||
375 | carrier_on_task); | 359 | carrier_on_task); |
376 | struct ib_port_attr attr; | 360 | struct ib_port_attr attr; |
377 | 361 | ||
362 | /* | ||
363 | * Take rtnl_lock to avoid racing with ipoib_stop() and | ||
364 | * turning the carrier back on while a device is being | ||
365 | * removed. | ||
366 | */ | ||
378 | if (ib_query_port(priv->ca, priv->port, &attr) || | 367 | if (ib_query_port(priv->ca, priv->port, &attr) || |
379 | attr.state != IB_PORT_ACTIVE) { | 368 | attr.state != IB_PORT_ACTIVE) { |
380 | ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); | 369 | ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); |
381 | return; | 370 | return; |
382 | } | 371 | } |
383 | 372 | ||
384 | /* | 373 | rtnl_lock(); |
385 | * Take rtnl_lock to avoid racing with ipoib_stop() and | ||
386 | * turning the carrier back on while a device is being | ||
387 | * removed. However, ipoib_stop() will attempt to flush | ||
388 | * the workqueue while holding the rtnl lock, so loop | ||
389 | * on trylock until either we get the lock or we see | ||
390 | * FLAG_ADMIN_UP go away as that signals that we are bailing | ||
391 | * and can safely ignore the carrier on work. | ||
392 | */ | ||
393 | while (!rtnl_trylock()) { | ||
394 | if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | ||
395 | return; | ||
396 | else | ||
397 | msleep(20); | ||
398 | } | ||
399 | if (!ipoib_cm_admin_enabled(priv->dev)) | ||
400 | dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu)); | ||
401 | netif_carrier_on(priv->dev); | 374 | netif_carrier_on(priv->dev); |
402 | rtnl_unlock(); | 375 | rtnl_unlock(); |
403 | } | 376 | } |
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status, | |||
412 | ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", | 385 | ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", |
413 | mcast->mcmember.mgid.raw, status); | 386 | mcast->mcmember.mgid.raw, status); |
414 | 387 | ||
415 | /* | ||
416 | * We have to take the mutex to force mcast_join to | ||
417 | * return from ib_sa_multicast_join and set mcast->mc to a | ||
418 | * valid value. Otherwise we were racing with ourselves in | ||
419 | * that we might fail here, but get a valid return from | ||
420 | * ib_sa_multicast_join after we had cleared mcast->mc here, | ||
421 | * resulting in mis-matched joins and leaves and a deadlock | ||
422 | */ | ||
423 | mutex_lock(&mcast_mutex); | ||
424 | |||
425 | /* We trap for port events ourselves. */ | 388 | /* We trap for port events ourselves. */ |
426 | if (status == -ENETRESET) | 389 | if (status == -ENETRESET) { |
390 | status = 0; | ||
427 | goto out; | 391 | goto out; |
392 | } | ||
428 | 393 | ||
429 | if (!status) | 394 | if (!status) |
430 | status = ipoib_mcast_join_finish(mcast, &multicast->rec); | 395 | status = ipoib_mcast_join_finish(mcast, &multicast->rec); |
431 | 396 | ||
432 | if (!status) { | 397 | if (!status) { |
433 | mcast->backoff = 1; | 398 | mcast->backoff = 1; |
399 | mutex_lock(&mcast_mutex); | ||
434 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 400 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
435 | queue_delayed_work(priv->wq, &priv->mcast_task, 0); | 401 | queue_delayed_work(ipoib_workqueue, |
402 | &priv->mcast_task, 0); | ||
403 | mutex_unlock(&mcast_mutex); | ||
436 | 404 | ||
437 | /* | 405 | /* |
438 | * Defer carrier on work to priv->wq to avoid a | 406 | * Defer carrier on work to ipoib_workqueue to avoid a |
439 | * deadlock on rtnl_lock here. | 407 | * deadlock on rtnl_lock here. |
440 | */ | 408 | */ |
441 | if (mcast == priv->broadcast) | 409 | if (mcast == priv->broadcast) |
442 | queue_work(priv->wq, &priv->carrier_on_task); | 410 | queue_work(ipoib_workqueue, &priv->carrier_on_task); |
443 | } else { | ||
444 | if (mcast->logcount++ < 20) { | ||
445 | if (status == -ETIMEDOUT || status == -EAGAIN) { | ||
446 | ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", | ||
447 | mcast->mcmember.mgid.raw, status); | ||
448 | } else { | ||
449 | ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", | ||
450 | mcast->mcmember.mgid.raw, status); | ||
451 | } | ||
452 | } | ||
453 | 411 | ||
454 | mcast->backoff *= 2; | 412 | status = 0; |
455 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) | 413 | goto out; |
456 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; | ||
457 | } | 414 | } |
458 | out: | 415 | |
416 | if (mcast->logcount++ < 20) { | ||
417 | if (status == -ETIMEDOUT || status == -EAGAIN) { | ||
418 | ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", | ||
419 | mcast->mcmember.mgid.raw, status); | ||
420 | } else { | ||
421 | ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", | ||
422 | mcast->mcmember.mgid.raw, status); | ||
423 | } | ||
424 | } | ||
425 | |||
426 | mcast->backoff *= 2; | ||
427 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) | ||
428 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; | ||
429 | |||
430 | /* Clear the busy flag so we try again */ | ||
431 | status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | ||
432 | |||
433 | mutex_lock(&mcast_mutex); | ||
459 | spin_lock_irq(&priv->lock); | 434 | spin_lock_irq(&priv->lock); |
460 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | 435 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
461 | if (status) | 436 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
462 | mcast->mc = NULL; | ||
463 | complete(&mcast->done); | ||
464 | if (status == -ENETRESET) | ||
465 | status = 0; | ||
466 | if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags)) | ||
467 | queue_delayed_work(priv->wq, &priv->mcast_task, | ||
468 | mcast->backoff * HZ); | 437 | mcast->backoff * HZ); |
469 | spin_unlock_irq(&priv->lock); | 438 | spin_unlock_irq(&priv->lock); |
470 | mutex_unlock(&mcast_mutex); | 439 | mutex_unlock(&mcast_mutex); |
471 | 440 | out: | |
441 | complete(&mcast->done); | ||
472 | return status; | 442 | return status; |
473 | } | 443 | } |
474 | 444 | ||
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
517 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; | 487 | rec.hop_limit = priv->broadcast->mcmember.hop_limit; |
518 | } | 488 | } |
519 | 489 | ||
520 | mutex_lock(&mcast_mutex); | ||
521 | init_completion(&mcast->done); | ||
522 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | 490 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); |
491 | init_completion(&mcast->done); | ||
492 | set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags); | ||
493 | |||
523 | mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, | 494 | mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, |
524 | &rec, comp_mask, GFP_KERNEL, | 495 | &rec, comp_mask, GFP_KERNEL, |
525 | ipoib_mcast_join_complete, mcast); | 496 | ipoib_mcast_join_complete, mcast); |
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
533 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) | 504 | if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) |
534 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; | 505 | mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; |
535 | 506 | ||
507 | mutex_lock(&mcast_mutex); | ||
536 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 508 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
537 | queue_delayed_work(priv->wq, &priv->mcast_task, | 509 | queue_delayed_work(ipoib_workqueue, |
510 | &priv->mcast_task, | ||
538 | mcast->backoff * HZ); | 511 | mcast->backoff * HZ); |
512 | mutex_unlock(&mcast_mutex); | ||
539 | } | 513 | } |
540 | mutex_unlock(&mcast_mutex); | ||
541 | } | 514 | } |
542 | 515 | ||
543 | void ipoib_mcast_join_task(struct work_struct *work) | 516 | void ipoib_mcast_join_task(struct work_struct *work) |
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
574 | ipoib_warn(priv, "failed to allocate broadcast group\n"); | 547 | ipoib_warn(priv, "failed to allocate broadcast group\n"); |
575 | mutex_lock(&mcast_mutex); | 548 | mutex_lock(&mcast_mutex); |
576 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 549 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
577 | queue_delayed_work(priv->wq, &priv->mcast_task, | 550 | queue_delayed_work(ipoib_workqueue, |
578 | HZ); | 551 | &priv->mcast_task, HZ); |
579 | mutex_unlock(&mcast_mutex); | 552 | mutex_unlock(&mcast_mutex); |
580 | return; | 553 | return; |
581 | } | 554 | } |
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
590 | } | 563 | } |
591 | 564 | ||
592 | if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { | 565 | if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { |
593 | if (IS_ERR_OR_NULL(priv->broadcast->mc) && | 566 | if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) |
594 | !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) | ||
595 | ipoib_mcast_join(dev, priv->broadcast, 0); | 567 | ipoib_mcast_join(dev, priv->broadcast, 0); |
596 | return; | 568 | return; |
597 | } | 569 | } |
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
599 | while (1) { | 571 | while (1) { |
600 | struct ipoib_mcast *mcast = NULL; | 572 | struct ipoib_mcast *mcast = NULL; |
601 | 573 | ||
602 | /* | ||
603 | * Need the mutex so our flags are consistent, need the | ||
604 | * priv->lock so we don't race with list removals in either | ||
605 | * mcast_dev_flush or mcast_restart_task | ||
606 | */ | ||
607 | mutex_lock(&mcast_mutex); | ||
608 | spin_lock_irq(&priv->lock); | 574 | spin_lock_irq(&priv->lock); |
609 | list_for_each_entry(mcast, &priv->multicast_list, list) { | 575 | list_for_each_entry(mcast, &priv->multicast_list, list) { |
610 | if (IS_ERR_OR_NULL(mcast->mc) && | 576 | if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) |
611 | !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && | 577 | && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) |
612 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { | 578 | && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { |
613 | /* Found the next unjoined group */ | 579 | /* Found the next unjoined group */ |
614 | break; | 580 | break; |
615 | } | 581 | } |
616 | } | 582 | } |
617 | spin_unlock_irq(&priv->lock); | 583 | spin_unlock_irq(&priv->lock); |
618 | mutex_unlock(&mcast_mutex); | ||
619 | 584 | ||
620 | if (&mcast->list == &priv->multicast_list) { | 585 | if (&mcast->list == &priv->multicast_list) { |
621 | /* All done */ | 586 | /* All done */ |
622 | break; | 587 | break; |
623 | } | 588 | } |
624 | 589 | ||
625 | if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | 590 | ipoib_mcast_join(dev, mcast, 1); |
626 | ipoib_mcast_sendonly_join(mcast); | ||
627 | else | ||
628 | ipoib_mcast_join(dev, mcast, 1); | ||
629 | return; | 591 | return; |
630 | } | 592 | } |
631 | 593 | ||
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev) | |||
642 | 604 | ||
643 | mutex_lock(&mcast_mutex); | 605 | mutex_lock(&mcast_mutex); |
644 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) | 606 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) |
645 | queue_delayed_work(priv->wq, &priv->mcast_task, 0); | 607 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); |
646 | mutex_unlock(&mcast_mutex); | 608 | mutex_unlock(&mcast_mutex); |
647 | 609 | ||
648 | return 0; | 610 | return 0; |
649 | } | 611 | } |
650 | 612 | ||
651 | int ipoib_mcast_stop_thread(struct net_device *dev) | 613 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush) |
652 | { | 614 | { |
653 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 615 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
654 | 616 | ||
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev) | |||
659 | cancel_delayed_work(&priv->mcast_task); | 621 | cancel_delayed_work(&priv->mcast_task); |
660 | mutex_unlock(&mcast_mutex); | 622 | mutex_unlock(&mcast_mutex); |
661 | 623 | ||
662 | flush_workqueue(priv->wq); | 624 | if (flush) |
625 | flush_workqueue(ipoib_workqueue); | ||
663 | 626 | ||
664 | return 0; | 627 | return 0; |
665 | } | 628 | } |
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) | |||
670 | int ret = 0; | 633 | int ret = 0; |
671 | 634 | ||
672 | if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | 635 | if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) |
673 | ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n"); | ||
674 | |||
675 | if (!IS_ERR_OR_NULL(mcast->mc)) | ||
676 | ib_sa_free_multicast(mcast->mc); | 636 | ib_sa_free_multicast(mcast->mc); |
677 | 637 | ||
678 | if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { | 638 | if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { |
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) | |||
725 | memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); | 685 | memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); |
726 | __ipoib_mcast_add(dev, mcast); | 686 | __ipoib_mcast_add(dev, mcast); |
727 | list_add_tail(&mcast->list, &priv->multicast_list); | 687 | list_add_tail(&mcast->list, &priv->multicast_list); |
728 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) | ||
729 | queue_delayed_work(priv->wq, &priv->mcast_task, 0); | ||
730 | } | 688 | } |
731 | 689 | ||
732 | if (!mcast->ah) { | 690 | if (!mcast->ah) { |
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) | |||
740 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | 698 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) |
741 | ipoib_dbg_mcast(priv, "no address vector, " | 699 | ipoib_dbg_mcast(priv, "no address vector, " |
742 | "but multicast join already started\n"); | 700 | "but multicast join already started\n"); |
701 | else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) | ||
702 | ipoib_mcast_sendonly_join(mcast); | ||
743 | 703 | ||
744 | /* | 704 | /* |
745 | * If lookup completes between here and out:, don't | 705 | * If lookup completes between here and out:, don't |
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
799 | 759 | ||
800 | spin_unlock_irqrestore(&priv->lock, flags); | 760 | spin_unlock_irqrestore(&priv->lock, flags); |
801 | 761 | ||
802 | /* | 762 | /* seperate between the wait to the leave*/ |
803 | * make sure the in-flight joins have finished before we attempt | ||
804 | * to leave | ||
805 | */ | ||
806 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | 763 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) |
807 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | 764 | if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags)) |
808 | wait_for_completion(&mcast->done); | 765 | wait_for_completion(&mcast->done); |
809 | 766 | ||
810 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { | 767 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { |
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work) | |||
837 | 794 | ||
838 | ipoib_dbg_mcast(priv, "restarting multicast task\n"); | 795 | ipoib_dbg_mcast(priv, "restarting multicast task\n"); |
839 | 796 | ||
797 | ipoib_mcast_stop_thread(dev, 0); | ||
798 | |||
840 | local_irq_save(flags); | 799 | local_irq_save(flags); |
841 | netif_addr_lock(dev); | 800 | netif_addr_lock(dev); |
842 | spin_lock(&priv->lock); | 801 | spin_lock(&priv->lock); |
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work) | |||
921 | netif_addr_unlock(dev); | 880 | netif_addr_unlock(dev); |
922 | local_irq_restore(flags); | 881 | local_irq_restore(flags); |
923 | 882 | ||
924 | /* | 883 | /* We have to cancel outside of the spinlock */ |
925 | * make sure the in-flight joins have finished before we attempt | ||
926 | * to leave | ||
927 | */ | ||
928 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | ||
929 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | ||
930 | wait_for_completion(&mcast->done); | ||
931 | |||
932 | /* | ||
933 | * We have to cancel outside of the spinlock, but we have to | ||
934 | * take the rtnl lock or else we race with the removal of | ||
935 | * entries from the remove list in mcast_dev_flush as part | ||
936 | * of ipoib_stop(). We detect the drop of the ADMIN_UP flag | ||
937 | * to signal that we have hit this particular race, and we | ||
938 | * return since we know we don't need to do anything else | ||
939 | * anyway. | ||
940 | */ | ||
941 | while (!rtnl_trylock()) { | ||
942 | if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | ||
943 | return; | ||
944 | else | ||
945 | msleep(20); | ||
946 | } | ||
947 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { | 884 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { |
948 | ipoib_mcast_leave(mcast->dev, mcast); | 885 | ipoib_mcast_leave(mcast->dev, mcast); |
949 | ipoib_mcast_free(mcast); | 886 | ipoib_mcast_free(mcast); |
950 | } | 887 | } |
951 | /* | 888 | |
952 | * Restart our join task if needed | 889 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) |
953 | */ | 890 | ipoib_mcast_start_thread(dev); |
954 | ipoib_mcast_start_thread(dev); | ||
955 | rtnl_unlock(); | ||
956 | } | 891 | } |
957 | 892 | ||
958 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 893 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index b72a753eb41d..c56d5d44c53b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c | |||
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
145 | int ret, size; | 145 | int ret, size; |
146 | int i; | 146 | int i; |
147 | 147 | ||
148 | /* | ||
149 | * the various IPoIB tasks assume they will never race against | ||
150 | * themselves, so always use a single thread workqueue | ||
151 | */ | ||
152 | priv->wq = create_singlethread_workqueue("ipoib_wq"); | ||
153 | if (!priv->wq) { | ||
154 | printk(KERN_WARNING "ipoib: failed to allocate device WQ\n"); | ||
155 | return -ENODEV; | ||
156 | } | ||
157 | |||
158 | priv->pd = ib_alloc_pd(priv->ca); | 148 | priv->pd = ib_alloc_pd(priv->ca); |
159 | if (IS_ERR(priv->pd)) { | 149 | if (IS_ERR(priv->pd)) { |
160 | printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); | 150 | printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name); |
161 | goto out_free_wq; | 151 | return -ENODEV; |
162 | } | 152 | } |
163 | 153 | ||
164 | priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); | 154 | priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE); |
@@ -252,10 +242,6 @@ out_free_mr: | |||
252 | 242 | ||
253 | out_free_pd: | 243 | out_free_pd: |
254 | ib_dealloc_pd(priv->pd); | 244 | ib_dealloc_pd(priv->pd); |
255 | |||
256 | out_free_wq: | ||
257 | destroy_workqueue(priv->wq); | ||
258 | priv->wq = NULL; | ||
259 | return -ENODEV; | 245 | return -ENODEV; |
260 | } | 246 | } |
261 | 247 | ||
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev) | |||
284 | 270 | ||
285 | if (ib_dealloc_pd(priv->pd)) | 271 | if (ib_dealloc_pd(priv->pd)) |
286 | ipoib_warn(priv, "ib_dealloc_pd failed\n"); | 272 | ipoib_warn(priv, "ib_dealloc_pd failed\n"); |
287 | |||
288 | if (priv->wq) { | ||
289 | flush_workqueue(priv->wq); | ||
290 | destroy_workqueue(priv->wq); | ||
291 | priv->wq = NULL; | ||
292 | } | ||
293 | } | 273 | } |
294 | 274 | ||
295 | void ipoib_event(struct ib_event_handler *handler, | 275 | void ipoib_event(struct ib_event_handler *handler, |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 77ecf6d32237..6e22682c8255 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, | |||
1097 | * Asus UX31 0x361f00 20, 15, 0e clickpad | 1097 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
1098 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad | 1098 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
1099 | * Avatar AVIU-145A2 0x361f00 ? clickpad | 1099 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
1100 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons | ||
1101 | * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons | ||
1100 | * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) | 1102 | * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) |
1101 | * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons | 1103 | * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons |
1102 | * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) | 1104 | * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) |
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { | |||
1475 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), | 1477 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), |
1476 | }, | 1478 | }, |
1477 | }, | 1479 | }, |
1480 | { | ||
1481 | /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ | ||
1482 | .matches = { | ||
1483 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
1484 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), | ||
1485 | }, | ||
1486 | }, | ||
1487 | { | ||
1488 | /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */ | ||
1489 | .matches = { | ||
1490 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
1491 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), | ||
1492 | }, | ||
1493 | }, | ||
1478 | #endif | 1494 | #endif |
1479 | { } | 1495 | { } |
1480 | }; | 1496 | }; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f9472920d986..23e26e0768b5 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = { | |||
135 | 1232, 5710, 1156, 4696 | 135 | 1232, 5710, 1156, 4696 |
136 | }, | 136 | }, |
137 | { | 137 | { |
138 | (const char * const []){"LEN0034", "LEN0036", "LEN0039", | 138 | (const char * const []){"LEN0034", "LEN0036", "LEN0037", |
139 | "LEN2002", "LEN2004", NULL}, | 139 | "LEN0039", "LEN2002", "LEN2004", |
140 | NULL}, | ||
140 | 1024, 5112, 2024, 4832 | 141 | 1024, 5112, 2024, 4832 |
141 | }, | 142 | }, |
142 | { | 143 | { |
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
165 | "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ | 166 | "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ |
166 | "LEN0035", /* X240 */ | 167 | "LEN0035", /* X240 */ |
167 | "LEN0036", /* T440 */ | 168 | "LEN0036", /* T440 */ |
168 | "LEN0037", | 169 | "LEN0037", /* X1 Carbon 2nd */ |
169 | "LEN0038", | 170 | "LEN0038", |
170 | "LEN0039", /* T440s */ | 171 | "LEN0039", /* T440s */ |
171 | "LEN0041", | 172 | "LEN0041", |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 764857b4e268..c11556563ef0 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | |||
152 | }, | 152 | }, |
153 | }, | 153 | }, |
154 | { | 154 | { |
155 | /* Medion Akoya E7225 */ | ||
156 | .matches = { | ||
157 | DMI_MATCH(DMI_SYS_VENDOR, "Medion"), | ||
158 | DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), | ||
159 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), | ||
160 | }, | ||
161 | }, | ||
162 | { | ||
155 | /* Blue FB5601 */ | 163 | /* Blue FB5601 */ |
156 | .matches = { | 164 | .matches = { |
157 | DMI_MATCH(DMI_SYS_VENDOR, "blue"), | 165 | DMI_MATCH(DMI_SYS_VENDOR, "blue"), |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 98024856df07..59de6364a910 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -4284,7 +4284,6 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id) | |||
4284 | } | 4284 | } |
4285 | 4285 | ||
4286 | struct irq_remap_ops amd_iommu_irq_ops = { | 4286 | struct irq_remap_ops amd_iommu_irq_ops = { |
4287 | .supported = amd_iommu_supported, | ||
4288 | .prepare = amd_iommu_prepare, | 4287 | .prepare = amd_iommu_prepare, |
4289 | .enable = amd_iommu_enable, | 4288 | .enable = amd_iommu_enable, |
4290 | .disable = amd_iommu_disable, | 4289 | .disable = amd_iommu_disable, |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b0522f15730f..9a20248e7068 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -2014,9 +2014,6 @@ static bool detect_ivrs(void) | |||
2014 | /* Make sure ACS will be enabled during PCI probe */ | 2014 | /* Make sure ACS will be enabled during PCI probe */ |
2015 | pci_request_acs(); | 2015 | pci_request_acs(); |
2016 | 2016 | ||
2017 | if (!disable_irq_remap) | ||
2018 | amd_iommu_irq_remap = true; | ||
2019 | |||
2020 | return true; | 2017 | return true; |
2021 | } | 2018 | } |
2022 | 2019 | ||
@@ -2123,12 +2120,14 @@ static int __init iommu_go_to_state(enum iommu_init_state state) | |||
2123 | #ifdef CONFIG_IRQ_REMAP | 2120 | #ifdef CONFIG_IRQ_REMAP |
2124 | int __init amd_iommu_prepare(void) | 2121 | int __init amd_iommu_prepare(void) |
2125 | { | 2122 | { |
2126 | return iommu_go_to_state(IOMMU_ACPI_FINISHED); | 2123 | int ret; |
2127 | } | ||
2128 | 2124 | ||
2129 | int __init amd_iommu_supported(void) | 2125 | amd_iommu_irq_remap = true; |
2130 | { | 2126 | |
2131 | return amd_iommu_irq_remap ? 1 : 0; | 2127 | ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); |
2128 | if (ret) | ||
2129 | return ret; | ||
2130 | return amd_iommu_irq_remap ? 0 : -ENODEV; | ||
2132 | } | 2131 | } |
2133 | 2132 | ||
2134 | int __init amd_iommu_enable(void) | 2133 | int __init amd_iommu_enable(void) |
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 95ed6deae47f..861af9d8338a 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h | |||
@@ -33,7 +33,6 @@ extern void amd_iommu_init_notifier(void); | |||
33 | extern void amd_iommu_init_api(void); | 33 | extern void amd_iommu_init_api(void); |
34 | 34 | ||
35 | /* Needed for interrupt remapping */ | 35 | /* Needed for interrupt remapping */ |
36 | extern int amd_iommu_supported(void); | ||
37 | extern int amd_iommu_prepare(void); | 36 | extern int amd_iommu_prepare(void); |
38 | extern int amd_iommu_enable(void); | 37 | extern int amd_iommu_enable(void); |
39 | extern void amd_iommu_disable(void); | 38 | extern void amd_iommu_disable(void); |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index a55b207b9425..14de1ab223c8 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -32,8 +32,9 @@ struct hpet_scope { | |||
32 | }; | 32 | }; |
33 | 33 | ||
34 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) | 34 | #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) |
35 | #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) | 35 | #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8) |
36 | 36 | ||
37 | static int __read_mostly eim_mode; | ||
37 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | 38 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; |
38 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; | 39 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
39 | 40 | ||
@@ -481,11 +482,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) | |||
481 | if (iommu->ir_table) | 482 | if (iommu->ir_table) |
482 | return 0; | 483 | return 0; |
483 | 484 | ||
484 | ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); | 485 | ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL); |
485 | if (!ir_table) | 486 | if (!ir_table) |
486 | return -ENOMEM; | 487 | return -ENOMEM; |
487 | 488 | ||
488 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, | 489 | pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, |
489 | INTR_REMAP_PAGE_ORDER); | 490 | INTR_REMAP_PAGE_ORDER); |
490 | 491 | ||
491 | if (!pages) { | 492 | if (!pages) { |
@@ -566,13 +567,27 @@ static int __init dmar_x2apic_optout(void) | |||
566 | return dmar->flags & DMAR_X2APIC_OPT_OUT; | 567 | return dmar->flags & DMAR_X2APIC_OPT_OUT; |
567 | } | 568 | } |
568 | 569 | ||
569 | static int __init intel_irq_remapping_supported(void) | 570 | static void __init intel_cleanup_irq_remapping(void) |
571 | { | ||
572 | struct dmar_drhd_unit *drhd; | ||
573 | struct intel_iommu *iommu; | ||
574 | |||
575 | for_each_iommu(iommu, drhd) { | ||
576 | if (ecap_ir_support(iommu->ecap)) { | ||
577 | iommu_disable_irq_remapping(iommu); | ||
578 | intel_teardown_irq_remapping(iommu); | ||
579 | } | ||
580 | } | ||
581 | |||
582 | if (x2apic_supported()) | ||
583 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); | ||
584 | } | ||
585 | |||
586 | static int __init intel_prepare_irq_remapping(void) | ||
570 | { | 587 | { |
571 | struct dmar_drhd_unit *drhd; | 588 | struct dmar_drhd_unit *drhd; |
572 | struct intel_iommu *iommu; | 589 | struct intel_iommu *iommu; |
573 | 590 | ||
574 | if (disable_irq_remap) | ||
575 | return 0; | ||
576 | if (irq_remap_broken) { | 591 | if (irq_remap_broken) { |
577 | printk(KERN_WARNING | 592 | printk(KERN_WARNING |
578 | "This system BIOS has enabled interrupt remapping\n" | 593 | "This system BIOS has enabled interrupt remapping\n" |
@@ -581,38 +596,45 @@ static int __init intel_irq_remapping_supported(void) | |||
581 | "interrupt remapping is being disabled. Please\n" | 596 | "interrupt remapping is being disabled. Please\n" |
582 | "contact your BIOS vendor for an update\n"); | 597 | "contact your BIOS vendor for an update\n"); |
583 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | 598 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); |
584 | disable_irq_remap = 1; | 599 | return -ENODEV; |
585 | return 0; | ||
586 | } | 600 | } |
587 | 601 | ||
602 | if (dmar_table_init() < 0) | ||
603 | return -ENODEV; | ||
604 | |||
588 | if (!dmar_ir_support()) | 605 | if (!dmar_ir_support()) |
589 | return 0; | 606 | return -ENODEV; |
607 | |||
608 | if (parse_ioapics_under_ir() != 1) { | ||
609 | printk(KERN_INFO "Not enabling interrupt remapping\n"); | ||
610 | goto error; | ||
611 | } | ||
590 | 612 | ||
613 | /* First make sure all IOMMUs support IRQ remapping */ | ||
591 | for_each_iommu(iommu, drhd) | 614 | for_each_iommu(iommu, drhd) |
592 | if (!ecap_ir_support(iommu->ecap)) | 615 | if (!ecap_ir_support(iommu->ecap)) |
593 | return 0; | 616 | goto error; |
594 | 617 | ||
595 | return 1; | 618 | /* Do the allocations early */ |
619 | for_each_iommu(iommu, drhd) | ||
620 | if (intel_setup_irq_remapping(iommu)) | ||
621 | goto error; | ||
622 | |||
623 | return 0; | ||
624 | |||
625 | error: | ||
626 | intel_cleanup_irq_remapping(); | ||
627 | return -ENODEV; | ||
596 | } | 628 | } |
597 | 629 | ||
598 | static int __init intel_enable_irq_remapping(void) | 630 | static int __init intel_enable_irq_remapping(void) |
599 | { | 631 | { |
600 | struct dmar_drhd_unit *drhd; | 632 | struct dmar_drhd_unit *drhd; |
601 | struct intel_iommu *iommu; | 633 | struct intel_iommu *iommu; |
602 | bool x2apic_present; | ||
603 | int setup = 0; | 634 | int setup = 0; |
604 | int eim = 0; | 635 | int eim = 0; |
605 | 636 | ||
606 | x2apic_present = x2apic_supported(); | 637 | if (x2apic_supported()) { |
607 | |||
608 | if (parse_ioapics_under_ir() != 1) { | ||
609 | printk(KERN_INFO "Not enable interrupt remapping\n"); | ||
610 | goto error; | ||
611 | } | ||
612 | |||
613 | if (x2apic_present) { | ||
614 | pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); | ||
615 | |||
616 | eim = !dmar_x2apic_optout(); | 638 | eim = !dmar_x2apic_optout(); |
617 | if (!eim) | 639 | if (!eim) |
618 | printk(KERN_WARNING | 640 | printk(KERN_WARNING |
@@ -646,16 +668,15 @@ static int __init intel_enable_irq_remapping(void) | |||
646 | /* | 668 | /* |
647 | * check for the Interrupt-remapping support | 669 | * check for the Interrupt-remapping support |
648 | */ | 670 | */ |
649 | for_each_iommu(iommu, drhd) { | 671 | for_each_iommu(iommu, drhd) |
650 | if (!ecap_ir_support(iommu->ecap)) | ||
651 | continue; | ||
652 | |||
653 | if (eim && !ecap_eim_support(iommu->ecap)) { | 672 | if (eim && !ecap_eim_support(iommu->ecap)) { |
654 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | 673 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " |
655 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | 674 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); |
656 | goto error; | 675 | eim = 0; |
657 | } | 676 | } |
658 | } | 677 | eim_mode = eim; |
678 | if (eim) | ||
679 | pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); | ||
659 | 680 | ||
660 | /* | 681 | /* |
661 | * Enable queued invalidation for all the DRHD's. | 682 | * Enable queued invalidation for all the DRHD's. |
@@ -675,12 +696,6 @@ static int __init intel_enable_irq_remapping(void) | |||
675 | * Setup Interrupt-remapping for all the DRHD's now. | 696 | * Setup Interrupt-remapping for all the DRHD's now. |
676 | */ | 697 | */ |
677 | for_each_iommu(iommu, drhd) { | 698 | for_each_iommu(iommu, drhd) { |
678 | if (!ecap_ir_support(iommu->ecap)) | ||
679 | continue; | ||
680 | |||
681 | if (intel_setup_irq_remapping(iommu)) | ||
682 | goto error; | ||
683 | |||
684 | iommu_set_irq_remapping(iommu, eim); | 699 | iommu_set_irq_remapping(iommu, eim); |
685 | setup = 1; | 700 | setup = 1; |
686 | } | 701 | } |
@@ -702,15 +717,7 @@ static int __init intel_enable_irq_remapping(void) | |||
702 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; | 717 | return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; |
703 | 718 | ||
704 | error: | 719 | error: |
705 | for_each_iommu(iommu, drhd) | 720 | intel_cleanup_irq_remapping(); |
706 | if (ecap_ir_support(iommu->ecap)) { | ||
707 | iommu_disable_irq_remapping(iommu); | ||
708 | intel_teardown_irq_remapping(iommu); | ||
709 | } | ||
710 | |||
711 | if (x2apic_present) | ||
712 | pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); | ||
713 | |||
714 | return -1; | 721 | return -1; |
715 | } | 722 | } |
716 | 723 | ||
@@ -1199,8 +1206,7 @@ static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) | |||
1199 | } | 1206 | } |
1200 | 1207 | ||
1201 | struct irq_remap_ops intel_irq_remap_ops = { | 1208 | struct irq_remap_ops intel_irq_remap_ops = { |
1202 | .supported = intel_irq_remapping_supported, | 1209 | .prepare = intel_prepare_irq_remapping, |
1203 | .prepare = dmar_table_init, | ||
1204 | .enable = intel_enable_irq_remapping, | 1210 | .enable = intel_enable_irq_remapping, |
1205 | .disable = disable_irq_remapping, | 1211 | .disable = disable_irq_remapping, |
1206 | .reenable = reenable_irq_remapping, | 1212 | .reenable = reenable_irq_remapping, |
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 89c4846683be..390079ee1350 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c | |||
@@ -17,12 +17,11 @@ | |||
17 | #include "irq_remapping.h" | 17 | #include "irq_remapping.h" |
18 | 18 | ||
19 | int irq_remapping_enabled; | 19 | int irq_remapping_enabled; |
20 | |||
21 | int disable_irq_remap; | ||
22 | int irq_remap_broken; | 20 | int irq_remap_broken; |
23 | int disable_sourceid_checking; | 21 | int disable_sourceid_checking; |
24 | int no_x2apic_optout; | 22 | int no_x2apic_optout; |
25 | 23 | ||
24 | static int disable_irq_remap; | ||
26 | static struct irq_remap_ops *remap_ops; | 25 | static struct irq_remap_ops *remap_ops; |
27 | 26 | ||
28 | static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); | 27 | static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); |
@@ -194,45 +193,32 @@ static __init int setup_irqremap(char *str) | |||
194 | } | 193 | } |
195 | early_param("intremap", setup_irqremap); | 194 | early_param("intremap", setup_irqremap); |
196 | 195 | ||
197 | void __init setup_irq_remapping_ops(void) | ||
198 | { | ||
199 | remap_ops = &intel_irq_remap_ops; | ||
200 | |||
201 | #ifdef CONFIG_AMD_IOMMU | ||
202 | if (amd_iommu_irq_ops.prepare() == 0) | ||
203 | remap_ops = &amd_iommu_irq_ops; | ||
204 | #endif | ||
205 | } | ||
206 | |||
207 | void set_irq_remapping_broken(void) | 196 | void set_irq_remapping_broken(void) |
208 | { | 197 | { |
209 | irq_remap_broken = 1; | 198 | irq_remap_broken = 1; |
210 | } | 199 | } |
211 | 200 | ||
212 | int irq_remapping_supported(void) | 201 | int __init irq_remapping_prepare(void) |
213 | { | 202 | { |
214 | if (disable_irq_remap) | 203 | if (disable_irq_remap) |
215 | return 0; | 204 | return -ENOSYS; |
216 | |||
217 | if (!remap_ops || !remap_ops->supported) | ||
218 | return 0; | ||
219 | |||
220 | return remap_ops->supported(); | ||
221 | } | ||
222 | 205 | ||
223 | int __init irq_remapping_prepare(void) | 206 | if (intel_irq_remap_ops.prepare() == 0) |
224 | { | 207 | remap_ops = &intel_irq_remap_ops; |
225 | if (!remap_ops || !remap_ops->prepare) | 208 | else if (IS_ENABLED(CONFIG_AMD_IOMMU) && |
226 | return -ENODEV; | 209 | amd_iommu_irq_ops.prepare() == 0) |
210 | remap_ops = &amd_iommu_irq_ops; | ||
211 | else | ||
212 | return -ENOSYS; | ||
227 | 213 | ||
228 | return remap_ops->prepare(); | 214 | return 0; |
229 | } | 215 | } |
230 | 216 | ||
231 | int __init irq_remapping_enable(void) | 217 | int __init irq_remapping_enable(void) |
232 | { | 218 | { |
233 | int ret; | 219 | int ret; |
234 | 220 | ||
235 | if (!remap_ops || !remap_ops->enable) | 221 | if (!remap_ops->enable) |
236 | return -ENODEV; | 222 | return -ENODEV; |
237 | 223 | ||
238 | ret = remap_ops->enable(); | 224 | ret = remap_ops->enable(); |
@@ -245,22 +231,16 @@ int __init irq_remapping_enable(void) | |||
245 | 231 | ||
246 | void irq_remapping_disable(void) | 232 | void irq_remapping_disable(void) |
247 | { | 233 | { |
248 | if (!irq_remapping_enabled || | 234 | if (irq_remapping_enabled && remap_ops->disable) |
249 | !remap_ops || | 235 | remap_ops->disable(); |
250 | !remap_ops->disable) | ||
251 | return; | ||
252 | |||
253 | remap_ops->disable(); | ||
254 | } | 236 | } |
255 | 237 | ||
256 | int irq_remapping_reenable(int mode) | 238 | int irq_remapping_reenable(int mode) |
257 | { | 239 | { |
258 | if (!irq_remapping_enabled || | 240 | if (irq_remapping_enabled && remap_ops->reenable) |
259 | !remap_ops || | 241 | return remap_ops->reenable(mode); |
260 | !remap_ops->reenable) | ||
261 | return 0; | ||
262 | 242 | ||
263 | return remap_ops->reenable(mode); | 243 | return 0; |
264 | } | 244 | } |
265 | 245 | ||
266 | int __init irq_remap_enable_fault_handling(void) | 246 | int __init irq_remap_enable_fault_handling(void) |
@@ -268,7 +248,7 @@ int __init irq_remap_enable_fault_handling(void) | |||
268 | if (!irq_remapping_enabled) | 248 | if (!irq_remapping_enabled) |
269 | return 0; | 249 | return 0; |
270 | 250 | ||
271 | if (!remap_ops || !remap_ops->enable_faulting) | 251 | if (!remap_ops->enable_faulting) |
272 | return -ENODEV; | 252 | return -ENODEV; |
273 | 253 | ||
274 | return remap_ops->enable_faulting(); | 254 | return remap_ops->enable_faulting(); |
@@ -279,7 +259,7 @@ int setup_ioapic_remapped_entry(int irq, | |||
279 | unsigned int destination, int vector, | 259 | unsigned int destination, int vector, |
280 | struct io_apic_irq_attr *attr) | 260 | struct io_apic_irq_attr *attr) |
281 | { | 261 | { |
282 | if (!remap_ops || !remap_ops->setup_ioapic_entry) | 262 | if (!remap_ops->setup_ioapic_entry) |
283 | return -ENODEV; | 263 | return -ENODEV; |
284 | 264 | ||
285 | return remap_ops->setup_ioapic_entry(irq, entry, destination, | 265 | return remap_ops->setup_ioapic_entry(irq, entry, destination, |
@@ -289,8 +269,7 @@ int setup_ioapic_remapped_entry(int irq, | |||
289 | static int set_remapped_irq_affinity(struct irq_data *data, | 269 | static int set_remapped_irq_affinity(struct irq_data *data, |
290 | const struct cpumask *mask, bool force) | 270 | const struct cpumask *mask, bool force) |
291 | { | 271 | { |
292 | if (!config_enabled(CONFIG_SMP) || !remap_ops || | 272 | if (!config_enabled(CONFIG_SMP) || !remap_ops->set_affinity) |
293 | !remap_ops->set_affinity) | ||
294 | return 0; | 273 | return 0; |
295 | 274 | ||
296 | return remap_ops->set_affinity(data, mask, force); | 275 | return remap_ops->set_affinity(data, mask, force); |
@@ -300,10 +279,7 @@ void free_remapped_irq(int irq) | |||
300 | { | 279 | { |
301 | struct irq_cfg *cfg = irq_cfg(irq); | 280 | struct irq_cfg *cfg = irq_cfg(irq); |
302 | 281 | ||
303 | if (!remap_ops || !remap_ops->free_irq) | 282 | if (irq_remapped(cfg) && remap_ops->free_irq) |
304 | return; | ||
305 | |||
306 | if (irq_remapped(cfg)) | ||
307 | remap_ops->free_irq(irq); | 283 | remap_ops->free_irq(irq); |
308 | } | 284 | } |
309 | 285 | ||
@@ -315,13 +291,13 @@ void compose_remapped_msi_msg(struct pci_dev *pdev, | |||
315 | 291 | ||
316 | if (!irq_remapped(cfg)) | 292 | if (!irq_remapped(cfg)) |
317 | native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); | 293 | native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); |
318 | else if (remap_ops && remap_ops->compose_msi_msg) | 294 | else if (remap_ops->compose_msi_msg) |
319 | remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id); | 295 | remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id); |
320 | } | 296 | } |
321 | 297 | ||
322 | static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) | 298 | static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) |
323 | { | 299 | { |
324 | if (!remap_ops || !remap_ops->msi_alloc_irq) | 300 | if (!remap_ops->msi_alloc_irq) |
325 | return -ENODEV; | 301 | return -ENODEV; |
326 | 302 | ||
327 | return remap_ops->msi_alloc_irq(pdev, irq, nvec); | 303 | return remap_ops->msi_alloc_irq(pdev, irq, nvec); |
@@ -330,7 +306,7 @@ static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) | |||
330 | static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, | 306 | static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, |
331 | int index, int sub_handle) | 307 | int index, int sub_handle) |
332 | { | 308 | { |
333 | if (!remap_ops || !remap_ops->msi_setup_irq) | 309 | if (!remap_ops->msi_setup_irq) |
334 | return -ENODEV; | 310 | return -ENODEV; |
335 | 311 | ||
336 | return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle); | 312 | return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle); |
@@ -340,7 +316,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) | |||
340 | { | 316 | { |
341 | int ret; | 317 | int ret; |
342 | 318 | ||
343 | if (!remap_ops || !remap_ops->alloc_hpet_msi) | 319 | if (!remap_ops->alloc_hpet_msi) |
344 | return -ENODEV; | 320 | return -ENODEV; |
345 | 321 | ||
346 | ret = remap_ops->alloc_hpet_msi(irq, id); | 322 | ret = remap_ops->alloc_hpet_msi(irq, id); |
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index fde250f86e60..c448eb48340a 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h | |||
@@ -31,16 +31,12 @@ struct cpumask; | |||
31 | struct pci_dev; | 31 | struct pci_dev; |
32 | struct msi_msg; | 32 | struct msi_msg; |
33 | 33 | ||
34 | extern int disable_irq_remap; | ||
35 | extern int irq_remap_broken; | 34 | extern int irq_remap_broken; |
36 | extern int disable_sourceid_checking; | 35 | extern int disable_sourceid_checking; |
37 | extern int no_x2apic_optout; | 36 | extern int no_x2apic_optout; |
38 | extern int irq_remapping_enabled; | 37 | extern int irq_remapping_enabled; |
39 | 38 | ||
40 | struct irq_remap_ops { | 39 | struct irq_remap_ops { |
41 | /* Check whether Interrupt Remapping is supported */ | ||
42 | int (*supported)(void); | ||
43 | |||
44 | /* Initializes hardware and makes it ready for remapping interrupts */ | 40 | /* Initializes hardware and makes it ready for remapping interrupts */ |
45 | int (*prepare)(void); | 41 | int (*prepare)(void); |
46 | 42 | ||
@@ -89,7 +85,6 @@ extern struct irq_remap_ops amd_iommu_irq_ops; | |||
89 | #else /* CONFIG_IRQ_REMAP */ | 85 | #else /* CONFIG_IRQ_REMAP */ |
90 | 86 | ||
91 | #define irq_remapping_enabled 0 | 87 | #define irq_remapping_enabled 0 |
92 | #define disable_irq_remap 1 | ||
93 | #define irq_remap_broken 0 | 88 | #define irq_remap_broken 0 |
94 | 89 | ||
95 | #endif /* CONFIG_IRQ_REMAP */ | 90 | #endif /* CONFIG_IRQ_REMAP */ |
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index f722a0c466cf..c48da057dbb1 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = { | |||
315 | .attach_dev = gart_iommu_attach_dev, | 315 | .attach_dev = gart_iommu_attach_dev, |
316 | .detach_dev = gart_iommu_detach_dev, | 316 | .detach_dev = gart_iommu_detach_dev, |
317 | .map = gart_iommu_map, | 317 | .map = gart_iommu_map, |
318 | .map_sg = default_iommu_map_sg, | ||
318 | .unmap = gart_iommu_unmap, | 319 | .unmap = gart_iommu_unmap, |
319 | .iova_to_phys = gart_iommu_iova_to_phys, | 320 | .iova_to_phys = gart_iommu_iova_to_phys, |
320 | .pgsize_bitmap = GART_IOMMU_PGSIZES, | 321 | .pgsize_bitmap = GART_IOMMU_PGSIZES, |
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev) | |||
395 | do_gart_setup(gart, NULL); | 396 | do_gart_setup(gart, NULL); |
396 | 397 | ||
397 | gart_handle = gart; | 398 | gart_handle = gart; |
398 | bus_set_iommu(&platform_bus_type, &gart_iommu_ops); | 399 | |
399 | return 0; | 400 | return 0; |
400 | } | 401 | } |
401 | 402 | ||
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 2b0468e3df6a..56b96c63dc4b 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain; | |||
37 | static int gic_shared_intrs; | 37 | static int gic_shared_intrs; |
38 | static int gic_vpes; | 38 | static int gic_vpes; |
39 | static unsigned int gic_cpu_pin; | 39 | static unsigned int gic_cpu_pin; |
40 | static unsigned int timer_cpu_pin; | ||
40 | static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; | 41 | static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; |
41 | 42 | ||
42 | static void __gic_irq_dispatch(void); | 43 | static void __gic_irq_dispatch(void); |
@@ -616,6 +617,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
616 | gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); | 617 | gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); |
617 | break; | 618 | break; |
618 | case GIC_LOCAL_INT_TIMER: | 619 | case GIC_LOCAL_INT_TIMER: |
620 | /* CONFIG_MIPS_CMP workaround (see __gic_init) */ | ||
621 | val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; | ||
619 | gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); | 622 | gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); |
620 | break; | 623 | break; |
621 | case GIC_LOCAL_INT_PERFCTR: | 624 | case GIC_LOCAL_INT_PERFCTR: |
@@ -713,12 +716,36 @@ static void __init __gic_init(unsigned long gic_base_addr, | |||
713 | if (cpu_has_veic) { | 716 | if (cpu_has_veic) { |
714 | /* Always use vector 1 in EIC mode */ | 717 | /* Always use vector 1 in EIC mode */ |
715 | gic_cpu_pin = 0; | 718 | gic_cpu_pin = 0; |
719 | timer_cpu_pin = gic_cpu_pin; | ||
716 | set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, | 720 | set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, |
717 | __gic_irq_dispatch); | 721 | __gic_irq_dispatch); |
718 | } else { | 722 | } else { |
719 | gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; | 723 | gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; |
720 | irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, | 724 | irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, |
721 | gic_irq_dispatch); | 725 | gic_irq_dispatch); |
726 | /* | ||
727 | * With the CMP implementation of SMP (deprecated), other CPUs | ||
728 | * are started by the bootloader and put into a timer based | ||
729 | * waiting poll loop. We must not re-route those CPU's local | ||
730 | * timer interrupts as the wait instruction will never finish, | ||
731 | * so just handle whatever CPU interrupt it is routed to by | ||
732 | * default. | ||
733 | * | ||
734 | * This workaround should be removed when CMP support is | ||
735 | * dropped. | ||
736 | */ | ||
737 | if (IS_ENABLED(CONFIG_MIPS_CMP) && | ||
738 | gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { | ||
739 | timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL, | ||
740 | GIC_VPE_TIMER_MAP)) & | ||
741 | GIC_MAP_MSK; | ||
742 | irq_set_chained_handler(MIPS_CPU_IRQ_BASE + | ||
743 | GIC_CPU_PIN_OFFSET + | ||
744 | timer_cpu_pin, | ||
745 | gic_irq_dispatch); | ||
746 | } else { | ||
747 | timer_cpu_pin = gic_cpu_pin; | ||
748 | } | ||
722 | } | 749 | } |
723 | 750 | ||
724 | gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + | 751 | gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + |
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 0b380603a578..d7c286656a25 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c | |||
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, | |||
1474 | add_ai(plci, &parms[5]); | 1474 | add_ai(plci, &parms[5]); |
1475 | sig_req(plci, REJECT, 0); | 1475 | sig_req(plci, REJECT, 0); |
1476 | } | 1476 | } |
1477 | else if (Reject == 1 || Reject > 9) | 1477 | else if (Reject == 1 || Reject >= 9) |
1478 | { | 1478 | { |
1479 | add_ai(plci, &parms[5]); | 1479 | add_ai(plci, &parms[5]); |
1480 | sig_req(plci, HANGUP, 0); | 1480 | sig_req(plci, HANGUP, 0); |
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 6dbf6fcbdfaf..e8902f8dddfc 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c | |||
@@ -386,7 +386,7 @@ static int __init pcc_init(void) | |||
386 | ret = acpi_pcc_probe(); | 386 | ret = acpi_pcc_probe(); |
387 | 387 | ||
388 | if (ret) { | 388 | if (ret) { |
389 | pr_err("ACPI PCC probe failed.\n"); | 389 | pr_debug("ACPI PCC probe failed.\n"); |
390 | return -ENODEV; | 390 | return -ENODEV; |
391 | } | 391 | } |
392 | 392 | ||
@@ -394,7 +394,7 @@ static int __init pcc_init(void) | |||
394 | pcc_mbox_probe, NULL, 0, NULL, 0); | 394 | pcc_mbox_probe, NULL, 0, NULL, 0); |
395 | 395 | ||
396 | if (!pcc_pdev) { | 396 | if (!pcc_pdev) { |
397 | pr_err("Err creating PCC platform bundle\n"); | 397 | pr_debug("Err creating PCC platform bundle\n"); |
398 | return -ENODEV; | 398 | return -ENODEV; |
399 | } | 399 | } |
400 | 400 | ||
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5bdedf6df153..c355a226a024 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | menuconfig MD | 5 | menuconfig MD |
6 | bool "Multiple devices driver support (RAID and LVM)" | 6 | bool "Multiple devices driver support (RAID and LVM)" |
7 | depends on BLOCK | 7 | depends on BLOCK |
8 | select SRCU | ||
8 | help | 9 | help |
9 | Support multiple physical spindles through a single logical device. | 10 | Support multiple physical spindles through a single logical device. |
10 | Required for RAID and logical volume management. | 11 | Required for RAID and logical volume management. |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index da3604e73e8a..1695ee5f3ffc 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -72,6 +72,19 @@ __acquires(bitmap->lock) | |||
72 | /* this page has not been allocated yet */ | 72 | /* this page has not been allocated yet */ |
73 | 73 | ||
74 | spin_unlock_irq(&bitmap->lock); | 74 | spin_unlock_irq(&bitmap->lock); |
75 | /* It is possible that this is being called inside a | ||
76 | * prepare_to_wait/finish_wait loop from raid5c:make_request(). | ||
77 | * In general it is not permitted to sleep in that context as it | ||
78 | * can cause the loop to spin freely. | ||
79 | * That doesn't apply here as we can only reach this point | ||
80 | * once with any loop. | ||
81 | * When this function completes, either bp[page].map or | ||
82 | * bp[page].hijacked. In either case, this function will | ||
83 | * abort before getting to this point again. So there is | ||
84 | * no risk of a free-spin, and so it is safe to assert | ||
85 | * that sleeping here is allowed. | ||
86 | */ | ||
87 | sched_annotate_sleep(); | ||
75 | mappage = kzalloc(PAGE_SIZE, GFP_NOIO); | 88 | mappage = kzalloc(PAGE_SIZE, GFP_NOIO); |
76 | spin_lock_irq(&bitmap->lock); | 89 | spin_lock_irq(&bitmap->lock); |
77 | 90 | ||
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 21b156242e42..c1c010498a21 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c | |||
@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev, | |||
683 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | 683 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
684 | if (!cmd) { | 684 | if (!cmd) { |
685 | DMERR("could not allocate metadata struct"); | 685 | DMERR("could not allocate metadata struct"); |
686 | return NULL; | 686 | return ERR_PTR(-ENOMEM); |
687 | } | 687 | } |
688 | 688 | ||
689 | atomic_set(&cmd->ref_count, 1); | 689 | atomic_set(&cmd->ref_count, 1); |
@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, | |||
745 | return cmd; | 745 | return cmd; |
746 | 746 | ||
747 | cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); | 747 | cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); |
748 | if (cmd) { | 748 | if (!IS_ERR(cmd)) { |
749 | mutex_lock(&table_lock); | 749 | mutex_lock(&table_lock); |
750 | cmd2 = lookup(bdev); | 750 | cmd2 = lookup(bdev); |
751 | if (cmd2) { | 751 | if (cmd2) { |
@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, | |||
780 | { | 780 | { |
781 | struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, | 781 | struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, |
782 | may_format_device, policy_hint_size); | 782 | may_format_device, policy_hint_size); |
783 | if (cmd && !same_params(cmd, data_block_size)) { | 783 | |
784 | if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { | ||
784 | dm_cache_metadata_close(cmd); | 785 | dm_cache_metadata_close(cmd); |
785 | return NULL; | 786 | return ERR_PTR(-EINVAL); |
786 | } | 787 | } |
787 | 788 | ||
788 | return cmd; | 789 | return cmd; |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 493478989dbd..07705ee181e3 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) | |||
3385 | struct pool_c *pt = ti->private; | 3385 | struct pool_c *pt = ti->private; |
3386 | struct pool *pool = pt->pool; | 3386 | struct pool *pool = pt->pool; |
3387 | 3387 | ||
3388 | if (get_pool_mode(pool) >= PM_READ_ONLY) { | ||
3389 | DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", | ||
3390 | dm_device_name(pool->pool_md)); | ||
3391 | return -EINVAL; | ||
3392 | } | ||
3393 | |||
3388 | if (!strcasecmp(argv[0], "create_thin")) | 3394 | if (!strcasecmp(argv[0], "create_thin")) |
3389 | r = process_create_thin_mesg(argc, argv, pool); | 3395 | r = process_create_thin_mesg(argc, argv, pool); |
3390 | 3396 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c1b0d52bfcb0..b98765f6f77f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf, | |||
3195 | (unsigned long long)sh->sector, | 3195 | (unsigned long long)sh->sector, |
3196 | rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); | 3196 | rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); |
3197 | } | 3197 | } |
3198 | |||
3199 | if (rcw > disks && rmw > disks && | ||
3200 | !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) | ||
3201 | set_bit(STRIPE_DELAYED, &sh->state); | ||
3202 | |||
3198 | /* now if nothing is locked, and if we have enough data, | 3203 | /* now if nothing is locked, and if we have enough data, |
3199 | * we can start a write request | 3204 | * we can start a write request |
3200 | */ | 3205 | */ |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d6607ee9c855..84673ebcf428 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC | |||
197 | 197 | ||
198 | config NETPOLL | 198 | config NETPOLL |
199 | def_bool NETCONSOLE | 199 | def_bool NETCONSOLE |
200 | select SRCU | ||
200 | 201 | ||
201 | config NET_POLL_CONTROLLER | 202 | config NET_POLL_CONTROLLER |
202 | def_bool NETPOLL | 203 | def_bool NETPOLL |
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 5e40a8b68cbe..b3b922adc0e4 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c | |||
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, | |||
1415 | 1415 | ||
1416 | cfhsi = netdev_priv(dev); | 1416 | cfhsi = netdev_priv(dev); |
1417 | cfhsi_netlink_parms(data, cfhsi); | 1417 | cfhsi_netlink_parms(data, cfhsi); |
1418 | dev_net_set(cfhsi->ndev, src_net); | ||
1419 | 1418 | ||
1420 | get_ops = symbol_get(cfhsi_get_ops); | 1419 | get_ops = symbol_get(cfhsi_get_ops); |
1421 | if (!get_ops) { | 1420 | if (!get_ops) { |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index f94a9fa60488..c672c4dcffac 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev) | |||
615 | 615 | ||
616 | c_can_irq_control(priv, false); | 616 | c_can_irq_control(priv, false); |
617 | 617 | ||
618 | /* put ctrl to init on stop to end ongoing transmission */ | ||
619 | priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); | ||
620 | |||
618 | /* deactivate pins */ | 621 | /* deactivate pins */ |
619 | pinctrl_pm_select_sleep_state(dev->dev.parent); | 622 | pinctrl_pm_select_sleep_state(dev->dev.parent); |
620 | priv->can.state = CAN_STATE_STOPPED; | 623 | priv->can.state = CAN_STATE_STOPPED; |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index c32cd61073bc..7af379ca861b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
587 | usb_sndbulkpipe(dev->udev, | 587 | usb_sndbulkpipe(dev->udev, |
588 | dev->bulk_out->bEndpointAddress), | 588 | dev->bulk_out->bEndpointAddress), |
589 | buf, msg->len, | 589 | buf, msg->len, |
590 | kvaser_usb_simple_msg_callback, priv); | 590 | kvaser_usb_simple_msg_callback, netdev); |
591 | usb_anchor_urb(urb, &priv->tx_submitted); | 591 | usb_anchor_urb(urb, &priv->tx_submitted); |
592 | 592 | ||
593 | err = usb_submit_urb(urb, GFP_ATOMIC); | 593 | err = usb_submit_urb(urb, GFP_ATOMIC); |
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
662 | priv = dev->nets[channel]; | 662 | priv = dev->nets[channel]; |
663 | stats = &priv->netdev->stats; | 663 | stats = &priv->netdev->stats; |
664 | 664 | ||
665 | if (status & M16C_STATE_BUS_RESET) { | ||
666 | kvaser_usb_unlink_tx_urbs(priv); | ||
667 | return; | ||
668 | } | ||
669 | |||
670 | skb = alloc_can_err_skb(priv->netdev, &cf); | 665 | skb = alloc_can_err_skb(priv->netdev, &cf); |
671 | if (!skb) { | 666 | if (!skb) { |
672 | stats->rx_dropped++; | 667 | stats->rx_dropped++; |
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
677 | 672 | ||
678 | netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); | 673 | netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); |
679 | 674 | ||
680 | if (status & M16C_STATE_BUS_OFF) { | 675 | if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { |
681 | cf->can_id |= CAN_ERR_BUSOFF; | 676 | cf->can_id |= CAN_ERR_BUSOFF; |
682 | 677 | ||
683 | priv->can.can_stats.bus_off++; | 678 | priv->can.can_stats.bus_off++; |
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
703 | } | 698 | } |
704 | 699 | ||
705 | new_state = CAN_STATE_ERROR_PASSIVE; | 700 | new_state = CAN_STATE_ERROR_PASSIVE; |
706 | } | 701 | } else if (status & M16C_STATE_BUS_ERROR) { |
707 | |||
708 | if (status == M16C_STATE_BUS_ERROR) { | ||
709 | if ((priv->can.state < CAN_STATE_ERROR_WARNING) && | 702 | if ((priv->can.state < CAN_STATE_ERROR_WARNING) && |
710 | ((txerr >= 96) || (rxerr >= 96))) { | 703 | ((txerr >= 96) || (rxerr >= 96))) { |
711 | cf->can_id |= CAN_ERR_CRTL; | 704 | cf->can_id |= CAN_ERR_CRTL; |
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
715 | 708 | ||
716 | priv->can.can_stats.error_warning++; | 709 | priv->can.can_stats.error_warning++; |
717 | new_state = CAN_STATE_ERROR_WARNING; | 710 | new_state = CAN_STATE_ERROR_WARNING; |
718 | } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { | 711 | } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && |
712 | ((txerr < 96) && (rxerr < 96))) { | ||
719 | cf->can_id |= CAN_ERR_PROT; | 713 | cf->can_id |= CAN_ERR_PROT; |
720 | cf->data[2] = CAN_ERR_PROT_ACTIVE; | 714 | cf->data[2] = CAN_ERR_PROT_ACTIVE; |
721 | 715 | ||
@@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
1590 | { | 1584 | { |
1591 | struct kvaser_usb *dev; | 1585 | struct kvaser_usb *dev; |
1592 | int err = -ENOMEM; | 1586 | int err = -ENOMEM; |
1593 | int i; | 1587 | int i, retry = 3; |
1594 | 1588 | ||
1595 | dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); | 1589 | dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); |
1596 | if (!dev) | 1590 | if (!dev) |
@@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
1608 | 1602 | ||
1609 | usb_set_intfdata(intf, dev); | 1603 | usb_set_intfdata(intf, dev); |
1610 | 1604 | ||
1611 | err = kvaser_usb_get_software_info(dev); | 1605 | /* On some x86 laptops, plugging a Kvaser device again after |
1606 | * an unplug makes the firmware always ignore the very first | ||
1607 | * command. For such a case, provide some room for retries | ||
1608 | * instead of completely exiting the driver. | ||
1609 | */ | ||
1610 | do { | ||
1611 | err = kvaser_usb_get_software_info(dev); | ||
1612 | } while (--retry && err == -ETIMEDOUT); | ||
1613 | |||
1612 | if (err) { | 1614 | if (err) { |
1613 | dev_err(&intf->dev, | 1615 | dev_err(&intf->dev, |
1614 | "Cannot get software infos, error %d\n", err); | 1616 | "Cannot get software infos, error %d\n", err); |
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 7a5e4aa5415e..77f1f6048ddd 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig | |||
@@ -45,7 +45,7 @@ config AMD8111_ETH | |||
45 | 45 | ||
46 | config LANCE | 46 | config LANCE |
47 | tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" | 47 | tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" |
48 | depends on ISA && ISA_DMA_API | 48 | depends on ISA && ISA_DMA_API && !ARM |
49 | ---help--- | 49 | ---help--- |
50 | If you have a network (Ethernet) card of this type, say Y and read | 50 | If you have a network (Ethernet) card of this type, say Y and read |
51 | the Ethernet-HOWTO, available from | 51 | the Ethernet-HOWTO, available from |
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN | |||
142 | 142 | ||
143 | config NI65 | 143 | config NI65 |
144 | tristate "NI6510 support" | 144 | tristate "NI6510 support" |
145 | depends on ISA && ISA_DMA_API | 145 | depends on ISA && ISA_DMA_API && !ARM |
146 | ---help--- | 146 | ---help--- |
147 | If you have a network (Ethernet) card of this type, say Y and read | 147 | If you have a network (Ethernet) card of this type, say Y and read |
148 | the Ethernet-HOWTO, available from | 148 | the Ethernet-HOWTO, available from |
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index e07ce5ff2d48..b10964e8cb54 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c | |||
@@ -553,8 +553,8 @@ static unsigned long __init lance_probe1( struct net_device *dev, | |||
553 | if (lp->cardtype == PAM_CARD || | 553 | if (lp->cardtype == PAM_CARD || |
554 | memaddr == (unsigned short *)0xffe00000) { | 554 | memaddr == (unsigned short *)0xffe00000) { |
555 | /* PAMs card and Riebl on ST use level 5 autovector */ | 555 | /* PAMs card and Riebl on ST use level 5 autovector */ |
556 | if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, | 556 | if (request_irq(IRQ_AUTO_5, lance_interrupt, 0, |
557 | "PAM,Riebl-ST Ethernet", dev)) { | 557 | "PAM,Riebl-ST Ethernet", dev)) { |
558 | printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); | 558 | printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); |
559 | return 0; | 559 | return 0; |
560 | } | 560 | } |
@@ -567,8 +567,8 @@ static unsigned long __init lance_probe1( struct net_device *dev, | |||
567 | printk( "Lance: request for VME interrupt failed\n" ); | 567 | printk( "Lance: request for VME interrupt failed\n" ); |
568 | return 0; | 568 | return 0; |
569 | } | 569 | } |
570 | if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, | 570 | if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet", |
571 | "Riebl-VME Ethernet", dev)) { | 571 | dev)) { |
572 | printk( "Lance: request for irq %u failed\n", irq ); | 572 | printk( "Lance: request for irq %u failed\n", irq ); |
573 | return 0; | 573 | return 0; |
574 | } | 574 | } |
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 5b22764ba88d..27245efe9f50 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c | |||
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) | |||
952 | do { | 952 | do { |
953 | /* WARNING: MACE_IR is a READ/CLEAR port! */ | 953 | /* WARNING: MACE_IR is a READ/CLEAR port! */ |
954 | status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); | 954 | status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); |
955 | if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS) | ||
956 | return IRQ_NONE; | ||
955 | 957 | ||
956 | pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); | 958 | pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); |
957 | 959 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 75b08c63d39f..29a09271b64a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
@@ -767,16 +767,17 @@ | |||
767 | #define MTL_Q_RQOMR 0x40 | 767 | #define MTL_Q_RQOMR 0x40 |
768 | #define MTL_Q_RQMPOCR 0x44 | 768 | #define MTL_Q_RQMPOCR 0x44 |
769 | #define MTL_Q_RQDR 0x4c | 769 | #define MTL_Q_RQDR 0x4c |
770 | #define MTL_Q_RQFCR 0x50 | ||
770 | #define MTL_Q_IER 0x70 | 771 | #define MTL_Q_IER 0x70 |
771 | #define MTL_Q_ISR 0x74 | 772 | #define MTL_Q_ISR 0x74 |
772 | 773 | ||
773 | /* MTL queue register entry bit positions and sizes */ | 774 | /* MTL queue register entry bit positions and sizes */ |
775 | #define MTL_Q_RQFCR_RFA_INDEX 1 | ||
776 | #define MTL_Q_RQFCR_RFA_WIDTH 6 | ||
777 | #define MTL_Q_RQFCR_RFD_INDEX 17 | ||
778 | #define MTL_Q_RQFCR_RFD_WIDTH 6 | ||
774 | #define MTL_Q_RQOMR_EHFC_INDEX 7 | 779 | #define MTL_Q_RQOMR_EHFC_INDEX 7 |
775 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 | 780 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 |
776 | #define MTL_Q_RQOMR_RFA_INDEX 8 | ||
777 | #define MTL_Q_RQOMR_RFA_WIDTH 3 | ||
778 | #define MTL_Q_RQOMR_RFD_INDEX 13 | ||
779 | #define MTL_Q_RQOMR_RFD_WIDTH 3 | ||
780 | #define MTL_Q_RQOMR_RQS_INDEX 16 | 781 | #define MTL_Q_RQOMR_RQS_INDEX 16 |
781 | #define MTL_Q_RQOMR_RQS_WIDTH 9 | 782 | #define MTL_Q_RQOMR_RQS_WIDTH 9 |
782 | #define MTL_Q_RQOMR_RSF_INDEX 5 | 783 | #define MTL_Q_RQOMR_RSF_INDEX 5 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 53f5f66ec2ee..4c66cd1d1e60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) | |||
2079 | 2079 | ||
2080 | for (i = 0; i < pdata->rx_q_count; i++) { | 2080 | for (i = 0; i < pdata->rx_q_count; i++) { |
2081 | /* Activate flow control when less than 4k left in fifo */ | 2081 | /* Activate flow control when less than 4k left in fifo */ |
2082 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); | 2082 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); |
2083 | 2083 | ||
2084 | /* De-activate flow control when more than 6k left in fifo */ | 2084 | /* De-activate flow control when more than 6k left in fifo */ |
2085 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); | 2085 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); |
2086 | } | 2086 | } |
2087 | } | 2087 | } |
2088 | 2088 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7bb5f07dbeef..e5ffb2ccb67d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) | |||
523 | hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); | 523 | hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); |
524 | hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); | 524 | hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); |
525 | hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); | 525 | hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); |
526 | hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); | ||
526 | hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); | 527 | hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); |
527 | hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, | 528 | hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, |
528 | HASHTBLSZ); | 529 | HASHTBLSZ); |
@@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) | |||
552 | break; | 553 | break; |
553 | } | 554 | } |
554 | 555 | ||
555 | /* The Queue and Channel counts are zero based so increment them | 556 | /* The Queue, Channel and TC counts are zero based so increment them |
556 | * to get the actual number | 557 | * to get the actual number |
557 | */ | 558 | */ |
558 | hw_feat->rx_q_cnt++; | 559 | hw_feat->rx_q_cnt++; |
559 | hw_feat->tx_q_cnt++; | 560 | hw_feat->tx_q_cnt++; |
560 | hw_feat->rx_ch_cnt++; | 561 | hw_feat->rx_ch_cnt++; |
561 | hw_feat->tx_ch_cnt++; | 562 | hw_feat->tx_ch_cnt++; |
563 | hw_feat->tc_cnt++; | ||
562 | 564 | ||
563 | DBGPR("<--xgbe_get_all_hw_features\n"); | 565 | DBGPR("<--xgbe_get_all_hw_features\n"); |
564 | } | 566 | } |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 83a50280bb70..793f3b73eeff 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
369 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) | 369 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) |
370 | break; | 370 | break; |
371 | 371 | ||
372 | /* read fpqnum field after dataaddr field */ | ||
373 | dma_rmb(); | ||
372 | if (is_rx_desc(raw_desc)) | 374 | if (is_rx_desc(raw_desc)) |
373 | ret = xgene_enet_rx_frame(ring, raw_desc); | 375 | ret = xgene_enet_rx_frame(ring, raw_desc); |
374 | else | 376 | else |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1d1147c93d59..e468ed3f210f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
3175 | } | 3175 | } |
3176 | #endif | 3176 | #endif |
3177 | if (!bnx2x_fp_lock_napi(fp)) | 3177 | if (!bnx2x_fp_lock_napi(fp)) |
3178 | return work_done; | 3178 | return budget; |
3179 | 3179 | ||
3180 | for_each_cos_in_tx_queue(fp, cos) | 3180 | for_each_cos_in_tx_queue(fp, cos) |
3181 | if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) | 3181 | if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) |
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 7403dff8f14a..905ac5f5d9a6 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig | |||
@@ -32,7 +32,8 @@ config CS89x0 | |||
32 | will be called cs89x0. | 32 | will be called cs89x0. |
33 | 33 | ||
34 | config CS89x0_PLATFORM | 34 | config CS89x0_PLATFORM |
35 | bool "CS89x0 platform driver support" | 35 | bool "CS89x0 platform driver support" if HAS_IOPORT_MAP |
36 | default !HAS_IOPORT_MAP | ||
36 | depends on CS89x0 | 37 | depends on CS89x0 |
37 | help | 38 | help |
38 | Say Y to compile the cs89x0 driver as a platform driver. This | 39 | Say Y to compile the cs89x0 driver as a platform driver. This |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b29e027c476e..e356afa44e7d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
1335 | int err; | 1335 | int err; |
1336 | 1336 | ||
1337 | if (!enic_poll_lock_napi(&enic->rq[rq])) | 1337 | if (!enic_poll_lock_napi(&enic->rq[rq])) |
1338 | return work_done; | 1338 | return budget; |
1339 | /* Service RQ | 1339 | /* Service RQ |
1340 | */ | 1340 | */ |
1341 | 1341 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3e1a9c1a67a9..fda12fb32ec7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c | |||
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv, | |||
1586 | return -EBUSY; | 1586 | return -EBUSY; |
1587 | 1587 | ||
1588 | /* Fill regular entries */ | 1588 | /* Fill regular entries */ |
1589 | for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); | 1589 | for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); |
1590 | i++) | 1590 | i++) |
1591 | gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); | 1591 | gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); |
1592 | /* Fill the rest with fall-troughs */ | 1592 | /* Fill the rest with fall-troughs */ |
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 63c807c9b21c..edea13b0ee85 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c | |||
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1907 | 1907 | ||
1908 | static int igbvf_tso(struct igbvf_adapter *adapter, | 1908 | static int igbvf_tso(struct igbvf_adapter *adapter, |
1909 | struct igbvf_ring *tx_ring, | 1909 | struct igbvf_ring *tx_ring, |
1910 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) | 1910 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, |
1911 | __be16 protocol) | ||
1911 | { | 1912 | { |
1912 | struct e1000_adv_tx_context_desc *context_desc; | 1913 | struct e1000_adv_tx_context_desc *context_desc; |
1913 | struct igbvf_buffer *buffer_info; | 1914 | struct igbvf_buffer *buffer_info; |
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1927 | l4len = tcp_hdrlen(skb); | 1928 | l4len = tcp_hdrlen(skb); |
1928 | *hdr_len += l4len; | 1929 | *hdr_len += l4len; |
1929 | 1930 | ||
1930 | if (skb->protocol == htons(ETH_P_IP)) { | 1931 | if (protocol == htons(ETH_P_IP)) { |
1931 | struct iphdr *iph = ip_hdr(skb); | 1932 | struct iphdr *iph = ip_hdr(skb); |
1932 | iph->tot_len = 0; | 1933 | iph->tot_len = 0; |
1933 | iph->check = 0; | 1934 | iph->check = 0; |
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1958 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | 1959 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
1959 | tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); | 1960 | tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); |
1960 | 1961 | ||
1961 | if (skb->protocol == htons(ETH_P_IP)) | 1962 | if (protocol == htons(ETH_P_IP)) |
1962 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; | 1963 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; |
1963 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; | 1964 | tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; |
1964 | 1965 | ||
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1984 | 1985 | ||
1985 | static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, | 1986 | static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, |
1986 | struct igbvf_ring *tx_ring, | 1987 | struct igbvf_ring *tx_ring, |
1987 | struct sk_buff *skb, u32 tx_flags) | 1988 | struct sk_buff *skb, u32 tx_flags, |
1989 | __be16 protocol) | ||
1988 | { | 1990 | { |
1989 | struct e1000_adv_tx_context_desc *context_desc; | 1991 | struct e1000_adv_tx_context_desc *context_desc; |
1990 | unsigned int i; | 1992 | unsigned int i; |
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, | |||
2011 | tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); | 2013 | tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); |
2012 | 2014 | ||
2013 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2015 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2014 | switch (skb->protocol) { | 2016 | switch (protocol) { |
2015 | case htons(ETH_P_IP): | 2017 | case htons(ETH_P_IP): |
2016 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; | 2018 | tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; |
2017 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 2019 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2211 | u8 hdr_len = 0; | 2213 | u8 hdr_len = 0; |
2212 | int count = 0; | 2214 | int count = 0; |
2213 | int tso = 0; | 2215 | int tso = 0; |
2216 | __be16 protocol = vlan_get_protocol(skb); | ||
2214 | 2217 | ||
2215 | if (test_bit(__IGBVF_DOWN, &adapter->state)) { | 2218 | if (test_bit(__IGBVF_DOWN, &adapter->state)) { |
2216 | dev_kfree_skb_any(skb); | 2219 | dev_kfree_skb_any(skb); |
@@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2239 | tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); | 2242 | tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); |
2240 | } | 2243 | } |
2241 | 2244 | ||
2242 | if (skb->protocol == htons(ETH_P_IP)) | 2245 | if (protocol == htons(ETH_P_IP)) |
2243 | tx_flags |= IGBVF_TX_FLAGS_IPV4; | 2246 | tx_flags |= IGBVF_TX_FLAGS_IPV4; |
2244 | 2247 | ||
2245 | first = tx_ring->next_to_use; | 2248 | first = tx_ring->next_to_use; |
2246 | 2249 | ||
2247 | tso = skb_is_gso(skb) ? | 2250 | tso = skb_is_gso(skb) ? |
2248 | igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; | 2251 | igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; |
2249 | if (unlikely(tso < 0)) { | 2252 | if (unlikely(tso < 0)) { |
2250 | dev_kfree_skb_any(skb); | 2253 | dev_kfree_skb_any(skb); |
2251 | return NETDEV_TX_OK; | 2254 | return NETDEV_TX_OK; |
@@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2253 | 2256 | ||
2254 | if (tso) | 2257 | if (tso) |
2255 | tx_flags |= IGBVF_TX_FLAGS_TSO; | 2258 | tx_flags |= IGBVF_TX_FLAGS_TSO; |
2256 | else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && | 2259 | else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && |
2257 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 2260 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
2258 | tx_flags |= IGBVF_TX_FLAGS_CSUM; | 2261 | tx_flags |= IGBVF_TX_FLAGS_CSUM; |
2259 | 2262 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2ed2c7de2304..67b02bde179e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
7227 | if (!vhdr) | 7227 | if (!vhdr) |
7228 | goto out_drop; | 7228 | goto out_drop; |
7229 | 7229 | ||
7230 | protocol = vhdr->h_vlan_encapsulated_proto; | ||
7231 | tx_flags |= ntohs(vhdr->h_vlan_TCI) << | 7230 | tx_flags |= ntohs(vhdr->h_vlan_TCI) << |
7232 | IXGBE_TX_FLAGS_VLAN_SHIFT; | 7231 | IXGBE_TX_FLAGS_VLAN_SHIFT; |
7233 | tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; | 7232 | tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; |
7234 | } | 7233 | } |
7234 | protocol = vlan_get_protocol(skb); | ||
7235 | 7235 | ||
7236 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | 7236 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
7237 | adapter->ptp_clock && | 7237 | adapter->ptp_clock && |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 62a0d8e0f17d..38c7a0be8197 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, | |||
3099 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | 3099 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
3100 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; | 3100 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; |
3101 | 3101 | ||
3102 | if (skb->protocol == htons(ETH_P_IP)) { | 3102 | if (first->protocol == htons(ETH_P_IP)) { |
3103 | struct iphdr *iph = ip_hdr(skb); | 3103 | struct iphdr *iph = ip_hdr(skb); |
3104 | iph->tot_len = 0; | 3104 | iph->tot_len = 0; |
3105 | iph->check = 0; | 3105 | iph->check = 0; |
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, | |||
3156 | 3156 | ||
3157 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 3157 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
3158 | u8 l4_hdr = 0; | 3158 | u8 l4_hdr = 0; |
3159 | switch (skb->protocol) { | 3159 | switch (first->protocol) { |
3160 | case htons(ETH_P_IP): | 3160 | case htons(ETH_P_IP): |
3161 | vlan_macip_lens |= skb_network_header_len(skb); | 3161 | vlan_macip_lens |= skb_network_header_len(skb); |
3162 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; | 3162 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a62fc38f045e..1c75829eb166 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
192 | #define IS_TSO_HEADER(txq, addr) \ | 192 | #define IS_TSO_HEADER(txq, addr) \ |
193 | ((addr >= txq->tso_hdrs_dma) && \ | 193 | ((addr >= txq->tso_hdrs_dma) && \ |
194 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) | 194 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) |
195 | |||
196 | #define DESC_DMA_MAP_SINGLE 0 | ||
197 | #define DESC_DMA_MAP_PAGE 1 | ||
198 | |||
195 | /* | 199 | /* |
196 | * RX/TX descriptors. | 200 | * RX/TX descriptors. |
197 | */ | 201 | */ |
@@ -362,6 +366,7 @@ struct tx_queue { | |||
362 | dma_addr_t tso_hdrs_dma; | 366 | dma_addr_t tso_hdrs_dma; |
363 | 367 | ||
364 | struct tx_desc *tx_desc_area; | 368 | struct tx_desc *tx_desc_area; |
369 | char *tx_desc_mapping; /* array to track the type of the dma mapping */ | ||
365 | dma_addr_t tx_desc_dma; | 370 | dma_addr_t tx_desc_dma; |
366 | int tx_desc_area_size; | 371 | int tx_desc_area_size; |
367 | 372 | ||
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, | |||
750 | if (txq->tx_curr_desc == txq->tx_ring_size) | 755 | if (txq->tx_curr_desc == txq->tx_ring_size) |
751 | txq->tx_curr_desc = 0; | 756 | txq->tx_curr_desc = 0; |
752 | desc = &txq->tx_desc_area[tx_index]; | 757 | desc = &txq->tx_desc_area[tx_index]; |
758 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; | ||
753 | 759 | ||
754 | desc->l4i_chk = 0; | 760 | desc->l4i_chk = 0; |
755 | desc->byte_cnt = length; | 761 | desc->byte_cnt = length; |
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
879 | skb_frag_t *this_frag; | 885 | skb_frag_t *this_frag; |
880 | int tx_index; | 886 | int tx_index; |
881 | struct tx_desc *desc; | 887 | struct tx_desc *desc; |
882 | void *addr; | ||
883 | 888 | ||
884 | this_frag = &skb_shinfo(skb)->frags[frag]; | 889 | this_frag = &skb_shinfo(skb)->frags[frag]; |
885 | addr = page_address(this_frag->page.p) + this_frag->page_offset; | ||
886 | tx_index = txq->tx_curr_desc++; | 890 | tx_index = txq->tx_curr_desc++; |
887 | if (txq->tx_curr_desc == txq->tx_ring_size) | 891 | if (txq->tx_curr_desc == txq->tx_ring_size) |
888 | txq->tx_curr_desc = 0; | 892 | txq->tx_curr_desc = 0; |
889 | desc = &txq->tx_desc_area[tx_index]; | 893 | desc = &txq->tx_desc_area[tx_index]; |
894 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; | ||
890 | 895 | ||
891 | /* | 896 | /* |
892 | * The last fragment will generate an interrupt | 897 | * The last fragment will generate an interrupt |
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
902 | 907 | ||
903 | desc->l4i_chk = 0; | 908 | desc->l4i_chk = 0; |
904 | desc->byte_cnt = skb_frag_size(this_frag); | 909 | desc->byte_cnt = skb_frag_size(this_frag); |
905 | desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, | 910 | desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, |
906 | desc->byte_cnt, DMA_TO_DEVICE); | 911 | this_frag, 0, desc->byte_cnt, |
912 | DMA_TO_DEVICE); | ||
907 | } | 913 | } |
908 | } | 914 | } |
909 | 915 | ||
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, | |||
936 | if (txq->tx_curr_desc == txq->tx_ring_size) | 942 | if (txq->tx_curr_desc == txq->tx_ring_size) |
937 | txq->tx_curr_desc = 0; | 943 | txq->tx_curr_desc = 0; |
938 | desc = &txq->tx_desc_area[tx_index]; | 944 | desc = &txq->tx_desc_area[tx_index]; |
945 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; | ||
939 | 946 | ||
940 | if (nr_frags) { | 947 | if (nr_frags) { |
941 | txq_submit_frag_skb(txq, skb); | 948 | txq_submit_frag_skb(txq, skb); |
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
1047 | int tx_index; | 1054 | int tx_index; |
1048 | struct tx_desc *desc; | 1055 | struct tx_desc *desc; |
1049 | u32 cmd_sts; | 1056 | u32 cmd_sts; |
1057 | char desc_dma_map; | ||
1050 | 1058 | ||
1051 | tx_index = txq->tx_used_desc; | 1059 | tx_index = txq->tx_used_desc; |
1052 | desc = &txq->tx_desc_area[tx_index]; | 1060 | desc = &txq->tx_desc_area[tx_index]; |
1061 | desc_dma_map = txq->tx_desc_mapping[tx_index]; | ||
1062 | |||
1053 | cmd_sts = desc->cmd_sts; | 1063 | cmd_sts = desc->cmd_sts; |
1054 | 1064 | ||
1055 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { | 1065 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
1065 | reclaimed++; | 1075 | reclaimed++; |
1066 | txq->tx_desc_count--; | 1076 | txq->tx_desc_count--; |
1067 | 1077 | ||
1068 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) | 1078 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { |
1069 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, | 1079 | |
1070 | desc->byte_cnt, DMA_TO_DEVICE); | 1080 | if (desc_dma_map == DESC_DMA_MAP_PAGE) |
1081 | dma_unmap_page(mp->dev->dev.parent, | ||
1082 | desc->buf_ptr, | ||
1083 | desc->byte_cnt, | ||
1084 | DMA_TO_DEVICE); | ||
1085 | else | ||
1086 | dma_unmap_single(mp->dev->dev.parent, | ||
1087 | desc->buf_ptr, | ||
1088 | desc->byte_cnt, | ||
1089 | DMA_TO_DEVICE); | ||
1090 | } | ||
1071 | 1091 | ||
1072 | if (cmd_sts & TX_ENABLE_INTERRUPT) { | 1092 | if (cmd_sts & TX_ENABLE_INTERRUPT) { |
1073 | struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); | 1093 | struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); |
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
1996 | struct tx_queue *txq = mp->txq + index; | 2016 | struct tx_queue *txq = mp->txq + index; |
1997 | struct tx_desc *tx_desc; | 2017 | struct tx_desc *tx_desc; |
1998 | int size; | 2018 | int size; |
2019 | int ret; | ||
1999 | int i; | 2020 | int i; |
2000 | 2021 | ||
2001 | txq->index = index; | 2022 | txq->index = index; |
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
2048 | nexti * sizeof(struct tx_desc); | 2069 | nexti * sizeof(struct tx_desc); |
2049 | } | 2070 | } |
2050 | 2071 | ||
2072 | txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), | ||
2073 | GFP_KERNEL); | ||
2074 | if (!txq->tx_desc_mapping) { | ||
2075 | ret = -ENOMEM; | ||
2076 | goto err_free_desc_area; | ||
2077 | } | ||
2078 | |||
2051 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ | 2079 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ |
2052 | txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, | 2080 | txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, |
2053 | txq->tx_ring_size * TSO_HEADER_SIZE, | 2081 | txq->tx_ring_size * TSO_HEADER_SIZE, |
2054 | &txq->tso_hdrs_dma, GFP_KERNEL); | 2082 | &txq->tso_hdrs_dma, GFP_KERNEL); |
2055 | if (txq->tso_hdrs == NULL) { | 2083 | if (txq->tso_hdrs == NULL) { |
2056 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | 2084 | ret = -ENOMEM; |
2057 | txq->tx_desc_area, txq->tx_desc_dma); | 2085 | goto err_free_desc_mapping; |
2058 | return -ENOMEM; | ||
2059 | } | 2086 | } |
2060 | skb_queue_head_init(&txq->tx_skb); | 2087 | skb_queue_head_init(&txq->tx_skb); |
2061 | 2088 | ||
2062 | return 0; | 2089 | return 0; |
2090 | |||
2091 | err_free_desc_mapping: | ||
2092 | kfree(txq->tx_desc_mapping); | ||
2093 | err_free_desc_area: | ||
2094 | if (index == 0 && size <= mp->tx_desc_sram_size) | ||
2095 | iounmap(txq->tx_desc_area); | ||
2096 | else | ||
2097 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | ||
2098 | txq->tx_desc_area, txq->tx_desc_dma); | ||
2099 | return ret; | ||
2063 | } | 2100 | } |
2064 | 2101 | ||
2065 | static void txq_deinit(struct tx_queue *txq) | 2102 | static void txq_deinit(struct tx_queue *txq) |
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq) | |||
2077 | else | 2114 | else |
2078 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | 2115 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, |
2079 | txq->tx_desc_area, txq->tx_desc_dma); | 2116 | txq->tx_desc_area, txq->tx_desc_dma); |
2117 | kfree(txq->tx_desc_mapping); | ||
2118 | |||
2080 | if (txq->tso_hdrs) | 2119 | if (txq->tso_hdrs) |
2081 | dma_free_coherent(mp->dev->dev.parent, | 2120 | dma_free_coherent(mp->dev->dev.parent, |
2082 | txq->tx_ring_size * TSO_HEADER_SIZE, | 2121 | txq->tx_ring_size * TSO_HEADER_SIZE, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index bdd4eea2247c..210691c89b6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h | |||
@@ -235,7 +235,8 @@ do { \ | |||
235 | extern int mlx4_log_num_mgm_entry_size; | 235 | extern int mlx4_log_num_mgm_entry_size; |
236 | extern int log_mtts_per_seg; | 236 | extern int log_mtts_per_seg; |
237 | 237 | ||
238 | #define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF) | 238 | #define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ |
239 | MLX4_MFUNC_MAX)) | ||
239 | #define ALL_SLAVES 0xff | 240 | #define ALL_SLAVES 0xff |
240 | 241 | ||
241 | struct mlx4_bitmap { | 242 | struct mlx4_bitmap { |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 613037584d08..c531c8ae1be4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) | |||
2388 | 2388 | ||
2389 | work_done = netxen_process_rcv_ring(sds_ring, budget); | 2389 | work_done = netxen_process_rcv_ring(sds_ring, budget); |
2390 | 2390 | ||
2391 | if ((work_done < budget) && tx_complete) { | 2391 | if (!tx_complete) |
2392 | work_done = budget; | ||
2393 | |||
2394 | if (work_done < budget) { | ||
2392 | napi_complete(&sds_ring->napi); | 2395 | napi_complete(&sds_ring->napi); |
2393 | if (test_bit(__NX_DEV_UP, &adapter->state)) | 2396 | if (test_bit(__NX_DEV_UP, &adapter->state)) |
2394 | netxen_nic_enable_int(sds_ring); | 2397 | netxen_nic_enable_int(sds_ring); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 18e5de72e9b4..4e1f58cf19ce 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) | |||
967 | tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, | 967 | tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, |
968 | budget); | 968 | budget); |
969 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); | 969 | work_done = qlcnic_process_rcv_ring(sds_ring, budget); |
970 | if ((work_done < budget) && tx_complete) { | 970 | |
971 | /* Check if we need a repoll */ | ||
972 | if (!tx_complete) | ||
973 | work_done = budget; | ||
974 | |||
975 | if (work_done < budget) { | ||
971 | napi_complete(&sds_ring->napi); | 976 | napi_complete(&sds_ring->napi); |
972 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { | 977 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { |
973 | qlcnic_enable_sds_intr(adapter, sds_ring); | 978 | qlcnic_enable_sds_intr(adapter, sds_ring); |
@@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget) | |||
992 | napi_complete(&tx_ring->napi); | 997 | napi_complete(&tx_ring->napi); |
993 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) | 998 | if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) |
994 | qlcnic_enable_tx_intr(adapter, tx_ring); | 999 | qlcnic_enable_tx_intr(adapter, tx_ring); |
1000 | } else { | ||
1001 | /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/ | ||
1002 | work_done = budget; | ||
995 | } | 1003 | } |
996 | 1004 | ||
997 | return work_done; | 1005 | return work_done; |
@@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) | |||
1950 | 1958 | ||
1951 | tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); | 1959 | tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); |
1952 | work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); | 1960 | work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); |
1953 | if ((work_done < budget) && tx_complete) { | 1961 | |
1962 | /* Check if we need a repoll */ | ||
1963 | if (!tx_complete) | ||
1964 | work_done = budget; | ||
1965 | |||
1966 | if (work_done < budget) { | ||
1954 | napi_complete(&sds_ring->napi); | 1967 | napi_complete(&sds_ring->napi); |
1955 | qlcnic_enable_sds_intr(adapter, sds_ring); | 1968 | qlcnic_enable_sds_intr(adapter, sds_ring); |
1956 | } | 1969 | } |
@@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) | |||
1973 | 1986 | ||
1974 | tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); | 1987 | tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); |
1975 | work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); | 1988 | work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); |
1976 | if ((work_done < budget) && tx_complete) { | 1989 | |
1990 | /* Check if we need a repoll */ | ||
1991 | if (!tx_complete) | ||
1992 | work_done = budget; | ||
1993 | |||
1994 | if (work_done < budget) { | ||
1977 | napi_complete(&sds_ring->napi); | 1995 | napi_complete(&sds_ring->napi); |
1978 | qlcnic_enable_sds_intr(adapter, sds_ring); | 1996 | qlcnic_enable_sds_intr(adapter, sds_ring); |
1979 | } | 1997 | } |
@@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) | |||
1995 | napi_complete(&tx_ring->napi); | 2013 | napi_complete(&tx_ring->napi); |
1996 | if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) | 2014 | if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) |
1997 | qlcnic_enable_tx_intr(adapter, tx_ring); | 2015 | qlcnic_enable_tx_intr(adapter, tx_ring); |
2016 | } else { | ||
2017 | /* need a repoll */ | ||
2018 | work_done = budget; | ||
1998 | } | 2019 | } |
1999 | 2020 | ||
2000 | return work_done; | 2021 | return work_done; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 6c904a6cad2a..ef5aed3b1225 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev, | |||
2351 | { | 2351 | { |
2352 | struct ql_adapter *qdev = netdev_priv(ndev); | 2352 | struct ql_adapter *qdev = netdev_priv(ndev); |
2353 | int status = 0; | 2353 | int status = 0; |
2354 | bool need_restart = netif_running(ndev); | ||
2354 | 2355 | ||
2355 | status = ql_adapter_down(qdev); | 2356 | if (need_restart) { |
2356 | if (status) { | 2357 | status = ql_adapter_down(qdev); |
2357 | netif_err(qdev, link, qdev->ndev, | 2358 | if (status) { |
2358 | "Failed to bring down the adapter\n"); | 2359 | netif_err(qdev, link, qdev->ndev, |
2359 | return status; | 2360 | "Failed to bring down the adapter\n"); |
2361 | return status; | ||
2362 | } | ||
2360 | } | 2363 | } |
2361 | 2364 | ||
2362 | /* update the features with resent change */ | 2365 | /* update the features with resent change */ |
2363 | ndev->features = features; | 2366 | ndev->features = features; |
2364 | 2367 | ||
2365 | status = ql_adapter_up(qdev); | 2368 | if (need_restart) { |
2366 | if (status) { | 2369 | status = ql_adapter_up(qdev); |
2367 | netif_err(qdev, link, qdev->ndev, | 2370 | if (status) { |
2368 | "Failed to bring up the adapter\n"); | 2371 | netif_err(qdev, link, qdev->ndev, |
2369 | return status; | 2372 | "Failed to bring up the adapter\n"); |
2373 | return status; | ||
2374 | } | ||
2370 | } | 2375 | } |
2376 | |||
2371 | return status; | 2377 | return status; |
2372 | } | 2378 | } |
2373 | 2379 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 14a1c5cec3a5..fa274e0f47d7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -4915,7 +4915,7 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) | |||
4915 | 4915 | ||
4916 | RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); | 4916 | RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); |
4917 | RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); | 4917 | RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); |
4918 | rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); | 4918 | rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); |
4919 | } | 4919 | } |
4920 | 4920 | ||
4921 | static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) | 4921 | static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) |
@@ -4948,7 +4948,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) | |||
4948 | RTL_W8(MaxTxPacketSize, 0x3f); | 4948 | RTL_W8(MaxTxPacketSize, 0x3f); |
4949 | RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); | 4949 | RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); |
4950 | RTL_W8(Config4, RTL_R8(Config4) | 0x01); | 4950 | RTL_W8(Config4, RTL_R8(Config4) | 0x01); |
4951 | rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); | 4951 | rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); |
4952 | } | 4952 | } |
4953 | 4953 | ||
4954 | static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) | 4954 | static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) |
@@ -4964,7 +4964,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) | |||
4964 | static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) | 4964 | static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) |
4965 | { | 4965 | { |
4966 | rtl_tx_performance_tweak(tp->pci_dev, | 4966 | rtl_tx_performance_tweak(tp->pci_dev, |
4967 | (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); | 4967 | PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); |
4968 | } | 4968 | } |
4969 | 4969 | ||
4970 | static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) | 4970 | static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6576243222af..04283fe0e6a7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { | |||
396 | [TSU_ADRL31] = 0x01fc, | 396 | [TSU_ADRL31] = 0x01fc, |
397 | }; | 397 | }; |
398 | 398 | ||
399 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); | ||
400 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); | ||
401 | |||
399 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 402 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
400 | { | 403 | { |
401 | return mdp->reg_offset == sh_eth_offset_gigabit; | 404 | return mdp->reg_offset == sh_eth_offset_gigabit; |
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1120 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; | 1123 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
1121 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; | 1124 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
1122 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1125 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
1126 | dma_addr_t dma_addr; | ||
1123 | 1127 | ||
1124 | mdp->cur_rx = 0; | 1128 | mdp->cur_rx = 0; |
1125 | mdp->cur_tx = 0; | 1129 | mdp->cur_tx = 0; |
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1133 | /* skb */ | 1137 | /* skb */ |
1134 | mdp->rx_skbuff[i] = NULL; | 1138 | mdp->rx_skbuff[i] = NULL; |
1135 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1139 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1136 | mdp->rx_skbuff[i] = skb; | ||
1137 | if (skb == NULL) | 1140 | if (skb == NULL) |
1138 | break; | 1141 | break; |
1139 | sh_eth_set_receive_align(skb); | 1142 | sh_eth_set_receive_align(skb); |
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1142 | rxdesc = &mdp->rx_ring[i]; | 1145 | rxdesc = &mdp->rx_ring[i]; |
1143 | /* The size of the buffer is a multiple of 16 bytes. */ | 1146 | /* The size of the buffer is a multiple of 16 bytes. */ |
1144 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1147 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
1145 | dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, | 1148 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
1146 | DMA_FROM_DEVICE); | 1149 | rxdesc->buffer_length, |
1147 | rxdesc->addr = virt_to_phys(skb->data); | 1150 | DMA_FROM_DEVICE); |
1151 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
1152 | kfree_skb(skb); | ||
1153 | break; | ||
1154 | } | ||
1155 | mdp->rx_skbuff[i] = skb; | ||
1156 | rxdesc->addr = dma_addr; | ||
1148 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1157 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
1149 | 1158 | ||
1150 | /* Rx descriptor address set */ | 1159 | /* Rx descriptor address set */ |
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
1316 | RFLR); | 1325 | RFLR); |
1317 | 1326 | ||
1318 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); | 1327 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); |
1319 | if (start) | 1328 | if (start) { |
1329 | mdp->irq_enabled = true; | ||
1320 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1330 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
1331 | } | ||
1321 | 1332 | ||
1322 | /* PAUSE Prohibition */ | 1333 | /* PAUSE Prohibition */ |
1323 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | | 1334 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
1356 | return ret; | 1367 | return ret; |
1357 | } | 1368 | } |
1358 | 1369 | ||
1370 | static void sh_eth_dev_exit(struct net_device *ndev) | ||
1371 | { | ||
1372 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1373 | int i; | ||
1374 | |||
1375 | /* Deactivate all TX descriptors, so DMA should stop at next | ||
1376 | * packet boundary if it's currently running | ||
1377 | */ | ||
1378 | for (i = 0; i < mdp->num_tx_ring; i++) | ||
1379 | mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); | ||
1380 | |||
1381 | /* Disable TX FIFO egress to MAC */ | ||
1382 | sh_eth_rcv_snd_disable(ndev); | ||
1383 | |||
1384 | /* Stop RX DMA at next packet boundary */ | ||
1385 | sh_eth_write(ndev, 0, EDRRR); | ||
1386 | |||
1387 | /* Aside from TX DMA, we can't tell when the hardware is | ||
1388 | * really stopped, so we need to reset to make sure. | ||
1389 | * Before doing that, wait for long enough to *probably* | ||
1390 | * finish transmitting the last packet and poll stats. | ||
1391 | */ | ||
1392 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ | ||
1393 | sh_eth_get_stats(ndev); | ||
1394 | sh_eth_reset(ndev); | ||
1395 | } | ||
1396 | |||
1359 | /* free Tx skb function */ | 1397 | /* free Tx skb function */ |
1360 | static int sh_eth_txfree(struct net_device *ndev) | 1398 | static int sh_eth_txfree(struct net_device *ndev) |
1361 | { | 1399 | { |
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1400 | u16 pkt_len = 0; | 1438 | u16 pkt_len = 0; |
1401 | u32 desc_status; | 1439 | u32 desc_status; |
1402 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1440 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
1441 | dma_addr_t dma_addr; | ||
1403 | 1442 | ||
1404 | boguscnt = min(boguscnt, *quota); | 1443 | boguscnt = min(boguscnt, *quota); |
1405 | limit = boguscnt; | 1444 | limit = boguscnt; |
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1447 | mdp->rx_skbuff[entry] = NULL; | 1486 | mdp->rx_skbuff[entry] = NULL; |
1448 | if (mdp->cd->rpadir) | 1487 | if (mdp->cd->rpadir) |
1449 | skb_reserve(skb, NET_IP_ALIGN); | 1488 | skb_reserve(skb, NET_IP_ALIGN); |
1450 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, | 1489 | dma_unmap_single(&ndev->dev, rxdesc->addr, |
1451 | ALIGN(mdp->rx_buf_sz, 16), | 1490 | ALIGN(mdp->rx_buf_sz, 16), |
1452 | DMA_FROM_DEVICE); | 1491 | DMA_FROM_DEVICE); |
1453 | skb_put(skb, pkt_len); | 1492 | skb_put(skb, pkt_len); |
1454 | skb->protocol = eth_type_trans(skb, ndev); | 1493 | skb->protocol = eth_type_trans(skb, ndev); |
1455 | netif_receive_skb(skb); | 1494 | netif_receive_skb(skb); |
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1469 | 1508 | ||
1470 | if (mdp->rx_skbuff[entry] == NULL) { | 1509 | if (mdp->rx_skbuff[entry] == NULL) { |
1471 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1510 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1472 | mdp->rx_skbuff[entry] = skb; | ||
1473 | if (skb == NULL) | 1511 | if (skb == NULL) |
1474 | break; /* Better luck next round. */ | 1512 | break; /* Better luck next round. */ |
1475 | sh_eth_set_receive_align(skb); | 1513 | sh_eth_set_receive_align(skb); |
1476 | dma_map_single(&ndev->dev, skb->data, | 1514 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
1477 | rxdesc->buffer_length, DMA_FROM_DEVICE); | 1515 | rxdesc->buffer_length, |
1516 | DMA_FROM_DEVICE); | ||
1517 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
1518 | kfree_skb(skb); | ||
1519 | break; | ||
1520 | } | ||
1521 | mdp->rx_skbuff[entry] = skb; | ||
1478 | 1522 | ||
1479 | skb_checksum_none_assert(skb); | 1523 | skb_checksum_none_assert(skb); |
1480 | rxdesc->addr = virt_to_phys(skb->data); | 1524 | rxdesc->addr = dma_addr; |
1481 | } | 1525 | } |
1482 | if (entry >= mdp->num_rx_ring - 1) | 1526 | if (entry >= mdp->num_rx_ring - 1) |
1483 | rxdesc->status |= | 1527 | rxdesc->status |= |
@@ -1573,7 +1617,6 @@ ignore_link: | |||
1573 | if (intr_status & EESR_RFRMER) { | 1617 | if (intr_status & EESR_RFRMER) { |
1574 | /* Receive Frame Overflow int */ | 1618 | /* Receive Frame Overflow int */ |
1575 | ndev->stats.rx_frame_errors++; | 1619 | ndev->stats.rx_frame_errors++; |
1576 | netif_err(mdp, rx_err, ndev, "Receive Abort\n"); | ||
1577 | } | 1620 | } |
1578 | } | 1621 | } |
1579 | 1622 | ||
@@ -1592,13 +1635,11 @@ ignore_link: | |||
1592 | if (intr_status & EESR_RDE) { | 1635 | if (intr_status & EESR_RDE) { |
1593 | /* Receive Descriptor Empty int */ | 1636 | /* Receive Descriptor Empty int */ |
1594 | ndev->stats.rx_over_errors++; | 1637 | ndev->stats.rx_over_errors++; |
1595 | netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); | ||
1596 | } | 1638 | } |
1597 | 1639 | ||
1598 | if (intr_status & EESR_RFE) { | 1640 | if (intr_status & EESR_RFE) { |
1599 | /* Receive FIFO Overflow int */ | 1641 | /* Receive FIFO Overflow int */ |
1600 | ndev->stats.rx_fifo_errors++; | 1642 | ndev->stats.rx_fifo_errors++; |
1601 | netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); | ||
1602 | } | 1643 | } |
1603 | 1644 | ||
1604 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { | 1645 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { |
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1653 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) | 1694 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) |
1654 | ret = IRQ_HANDLED; | 1695 | ret = IRQ_HANDLED; |
1655 | else | 1696 | else |
1656 | goto other_irq; | 1697 | goto out; |
1698 | |||
1699 | if (!likely(mdp->irq_enabled)) { | ||
1700 | sh_eth_write(ndev, 0, EESIPR); | ||
1701 | goto out; | ||
1702 | } | ||
1657 | 1703 | ||
1658 | if (intr_status & EESR_RX_CHECK) { | 1704 | if (intr_status & EESR_RX_CHECK) { |
1659 | if (napi_schedule_prep(&mdp->napi)) { | 1705 | if (napi_schedule_prep(&mdp->napi)) { |
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1684 | sh_eth_error(ndev, intr_status); | 1730 | sh_eth_error(ndev, intr_status); |
1685 | } | 1731 | } |
1686 | 1732 | ||
1687 | other_irq: | 1733 | out: |
1688 | spin_unlock(&mdp->lock); | 1734 | spin_unlock(&mdp->lock); |
1689 | 1735 | ||
1690 | return ret; | 1736 | return ret; |
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget) | |||
1712 | napi_complete(napi); | 1758 | napi_complete(napi); |
1713 | 1759 | ||
1714 | /* Reenable Rx interrupts */ | 1760 | /* Reenable Rx interrupts */ |
1715 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1761 | if (mdp->irq_enabled) |
1762 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | ||
1716 | out: | 1763 | out: |
1717 | return budget - quota; | 1764 | return budget - quota; |
1718 | } | 1765 | } |
@@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev, | |||
1968 | return -EINVAL; | 2015 | return -EINVAL; |
1969 | 2016 | ||
1970 | if (netif_running(ndev)) { | 2017 | if (netif_running(ndev)) { |
2018 | netif_device_detach(ndev); | ||
1971 | netif_tx_disable(ndev); | 2019 | netif_tx_disable(ndev); |
1972 | /* Disable interrupts by clearing the interrupt mask. */ | 2020 | |
1973 | sh_eth_write(ndev, 0x0000, EESIPR); | 2021 | /* Serialise with the interrupt handler and NAPI, then |
1974 | /* Stop the chip's Tx and Rx processes. */ | 2022 | * disable interrupts. We have to clear the |
1975 | sh_eth_write(ndev, 0, EDTRR); | 2023 | * irq_enabled flag first to ensure that interrupts |
1976 | sh_eth_write(ndev, 0, EDRRR); | 2024 | * won't be re-enabled. |
2025 | */ | ||
2026 | mdp->irq_enabled = false; | ||
1977 | synchronize_irq(ndev->irq); | 2027 | synchronize_irq(ndev->irq); |
1978 | } | 2028 | napi_synchronize(&mdp->napi); |
2029 | sh_eth_write(ndev, 0x0000, EESIPR); | ||
1979 | 2030 | ||
1980 | /* Free all the skbuffs in the Rx queue. */ | 2031 | sh_eth_dev_exit(ndev); |
1981 | sh_eth_ring_free(ndev); | 2032 | |
1982 | /* Free DMA buffer */ | 2033 | /* Free all the skbuffs in the Rx queue. */ |
1983 | sh_eth_free_dma_buffer(mdp); | 2034 | sh_eth_ring_free(ndev); |
2035 | /* Free DMA buffer */ | ||
2036 | sh_eth_free_dma_buffer(mdp); | ||
2037 | } | ||
1984 | 2038 | ||
1985 | /* Set new parameters */ | 2039 | /* Set new parameters */ |
1986 | mdp->num_rx_ring = ring->rx_pending; | 2040 | mdp->num_rx_ring = ring->rx_pending; |
1987 | mdp->num_tx_ring = ring->tx_pending; | 2041 | mdp->num_tx_ring = ring->tx_pending; |
1988 | 2042 | ||
1989 | ret = sh_eth_ring_init(ndev); | ||
1990 | if (ret < 0) { | ||
1991 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); | ||
1992 | return ret; | ||
1993 | } | ||
1994 | ret = sh_eth_dev_init(ndev, false); | ||
1995 | if (ret < 0) { | ||
1996 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); | ||
1997 | return ret; | ||
1998 | } | ||
1999 | |||
2000 | if (netif_running(ndev)) { | 2043 | if (netif_running(ndev)) { |
2044 | ret = sh_eth_ring_init(ndev); | ||
2045 | if (ret < 0) { | ||
2046 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", | ||
2047 | __func__); | ||
2048 | return ret; | ||
2049 | } | ||
2050 | ret = sh_eth_dev_init(ndev, false); | ||
2051 | if (ret < 0) { | ||
2052 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", | ||
2053 | __func__); | ||
2054 | return ret; | ||
2055 | } | ||
2056 | |||
2057 | mdp->irq_enabled = true; | ||
2001 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 2058 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
2002 | /* Setting the Rx mode will start the Rx process. */ | 2059 | /* Setting the Rx mode will start the Rx process. */ |
2003 | sh_eth_write(ndev, EDRRR_R, EDRRR); | 2060 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
2004 | netif_wake_queue(ndev); | 2061 | netif_device_attach(ndev); |
2005 | } | 2062 | } |
2006 | 2063 | ||
2007 | return 0; | 2064 | return 0; |
@@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2117 | } | 2174 | } |
2118 | spin_unlock_irqrestore(&mdp->lock, flags); | 2175 | spin_unlock_irqrestore(&mdp->lock, flags); |
2119 | 2176 | ||
2177 | if (skb_padto(skb, ETH_ZLEN)) | ||
2178 | return NETDEV_TX_OK; | ||
2179 | |||
2120 | entry = mdp->cur_tx % mdp->num_tx_ring; | 2180 | entry = mdp->cur_tx % mdp->num_tx_ring; |
2121 | mdp->tx_skbuff[entry] = skb; | 2181 | mdp->tx_skbuff[entry] = skb; |
2122 | txdesc = &mdp->tx_ring[entry]; | 2182 | txdesc = &mdp->tx_ring[entry]; |
@@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2126 | skb->len + 2); | 2186 | skb->len + 2); |
2127 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, | 2187 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, |
2128 | DMA_TO_DEVICE); | 2188 | DMA_TO_DEVICE); |
2129 | if (skb->len < ETH_ZLEN) | 2189 | if (dma_mapping_error(&ndev->dev, txdesc->addr)) { |
2130 | txdesc->buffer_length = ETH_ZLEN; | 2190 | kfree_skb(skb); |
2131 | else | 2191 | return NETDEV_TX_OK; |
2132 | txdesc->buffer_length = skb->len; | 2192 | } |
2193 | txdesc->buffer_length = skb->len; | ||
2133 | 2194 | ||
2134 | if (entry >= mdp->num_tx_ring - 1) | 2195 | if (entry >= mdp->num_tx_ring - 1) |
2135 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | 2196 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
@@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev) | |||
2181 | 2242 | ||
2182 | netif_stop_queue(ndev); | 2243 | netif_stop_queue(ndev); |
2183 | 2244 | ||
2184 | /* Disable interrupts by clearing the interrupt mask. */ | 2245 | /* Serialise with the interrupt handler and NAPI, then disable |
2246 | * interrupts. We have to clear the irq_enabled flag first to | ||
2247 | * ensure that interrupts won't be re-enabled. | ||
2248 | */ | ||
2249 | mdp->irq_enabled = false; | ||
2250 | synchronize_irq(ndev->irq); | ||
2251 | napi_disable(&mdp->napi); | ||
2185 | sh_eth_write(ndev, 0x0000, EESIPR); | 2252 | sh_eth_write(ndev, 0x0000, EESIPR); |
2186 | 2253 | ||
2187 | /* Stop the chip's Tx and Rx processes. */ | 2254 | sh_eth_dev_exit(ndev); |
2188 | sh_eth_write(ndev, 0, EDTRR); | ||
2189 | sh_eth_write(ndev, 0, EDRRR); | ||
2190 | 2255 | ||
2191 | sh_eth_get_stats(ndev); | ||
2192 | /* PHY Disconnect */ | 2256 | /* PHY Disconnect */ |
2193 | if (mdp->phydev) { | 2257 | if (mdp->phydev) { |
2194 | phy_stop(mdp->phydev); | 2258 | phy_stop(mdp->phydev); |
@@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev) | |||
2198 | 2262 | ||
2199 | free_irq(ndev->irq, ndev); | 2263 | free_irq(ndev->irq, ndev); |
2200 | 2264 | ||
2201 | napi_disable(&mdp->napi); | ||
2202 | |||
2203 | /* Free all the skbuffs in the Rx queue. */ | 2265 | /* Free all the skbuffs in the Rx queue. */ |
2204 | sh_eth_ring_free(ndev); | 2266 | sh_eth_ring_free(ndev); |
2205 | 2267 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 71f5de1171bd..332d3c16d483 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -513,6 +513,7 @@ struct sh_eth_private { | |||
513 | u32 rx_buf_sz; /* Based on MTU+slack. */ | 513 | u32 rx_buf_sz; /* Based on MTU+slack. */ |
514 | int edmac_endian; | 514 | int edmac_endian; |
515 | struct napi_struct napi; | 515 | struct napi_struct napi; |
516 | bool irq_enabled; | ||
516 | /* MII transceiver section. */ | 517 | /* MII transceiver section. */ |
517 | u32 phy_id; /* PHY ID */ | 518 | u32 phy_id; /* PHY ID */ |
518 | struct mii_bus *mii_bus; /* MDIO bus control */ | 519 | struct mii_bus *mii_bus; /* MDIO bus control */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8c6b7c1651e5..cf62ff4c8c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
2778 | * @addr: iobase memory address | 2778 | * @addr: iobase memory address |
2779 | * Description: this is the main probe function used to | 2779 | * Description: this is the main probe function used to |
2780 | * call the alloc_etherdev, allocate the priv structure. | 2780 | * call the alloc_etherdev, allocate the priv structure. |
2781 | * Return: | ||
2782 | * on success the new private structure is returned, otherwise the error | ||
2783 | * pointer. | ||
2781 | */ | 2784 | */ |
2782 | struct stmmac_priv *stmmac_dvr_probe(struct device *device, | 2785 | struct stmmac_priv *stmmac_dvr_probe(struct device *device, |
2783 | struct plat_stmmacenet_data *plat_dat, | 2786 | struct plat_stmmacenet_data *plat_dat, |
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
2789 | 2792 | ||
2790 | ndev = alloc_etherdev(sizeof(struct stmmac_priv)); | 2793 | ndev = alloc_etherdev(sizeof(struct stmmac_priv)); |
2791 | if (!ndev) | 2794 | if (!ndev) |
2792 | return NULL; | 2795 | return ERR_PTR(-ENOMEM); |
2793 | 2796 | ||
2794 | SET_NETDEV_DEV(ndev, device); | 2797 | SET_NETDEV_DEV(ndev, device); |
2795 | 2798 | ||
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index d2835bf7b4fb..3699b98d5b2c 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) | |||
1119 | skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; | 1119 | skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; |
1120 | skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; | 1120 | skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; |
1121 | } | 1121 | } |
1122 | nskb->queue_mapping = skb->queue_mapping; | ||
1122 | dev_kfree_skb(skb); | 1123 | dev_kfree_skb(skb); |
1123 | skb = nskb; | 1124 | skb = nskb; |
1124 | } | 1125 | } |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e068d48b0f21..a39131f494ec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, | |||
1683 | if (vid == priv->data.default_vlan) | 1683 | if (vid == priv->data.default_vlan) |
1684 | return 0; | 1684 | return 0; |
1685 | 1685 | ||
1686 | if (priv->data.dual_emac) { | ||
1687 | /* In dual EMAC, reserved VLAN id should not be used for | ||
1688 | * creating VLAN interfaces as this can break the dual | ||
1689 | * EMAC port separation | ||
1690 | */ | ||
1691 | int i; | ||
1692 | |||
1693 | for (i = 0; i < priv->data.slaves; i++) { | ||
1694 | if (vid == priv->slaves[i].port_vlan) | ||
1695 | return -EINVAL; | ||
1696 | } | ||
1697 | } | ||
1698 | |||
1686 | dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); | 1699 | dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); |
1687 | return cpsw_add_vlan_ale_entry(priv, vid); | 1700 | return cpsw_add_vlan_ale_entry(priv, vid); |
1688 | } | 1701 | } |
@@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, | |||
1696 | if (vid == priv->data.default_vlan) | 1709 | if (vid == priv->data.default_vlan) |
1697 | return 0; | 1710 | return 0; |
1698 | 1711 | ||
1712 | if (priv->data.dual_emac) { | ||
1713 | int i; | ||
1714 | |||
1715 | for (i = 0; i < priv->data.slaves; i++) { | ||
1716 | if (vid == priv->slaves[i].port_vlan) | ||
1717 | return -EINVAL; | ||
1718 | } | ||
1719 | } | ||
1720 | |||
1699 | dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); | 1721 | dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); |
1700 | ret = cpsw_ale_del_vlan(priv->ale, vid, 0); | 1722 | ret = cpsw_ale_del_vlan(priv->ale, vid, 0); |
1701 | if (ret != 0) | 1723 | if (ret != 0) |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 9f49c0129a78..7cd4eb38abfa 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device, | |||
716 | u64 req_id; | 716 | u64 req_id; |
717 | unsigned int section_index = NETVSC_INVALID_INDEX; | 717 | unsigned int section_index = NETVSC_INVALID_INDEX; |
718 | u32 msg_size = 0; | 718 | u32 msg_size = 0; |
719 | struct sk_buff *skb; | 719 | struct sk_buff *skb = NULL; |
720 | u16 q_idx = packet->q_idx; | 720 | u16 q_idx = packet->q_idx; |
721 | 721 | ||
722 | 722 | ||
@@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device, | |||
743 | packet); | 743 | packet); |
744 | skb = (struct sk_buff *) | 744 | skb = (struct sk_buff *) |
745 | (unsigned long)packet->send_completion_tid; | 745 | (unsigned long)packet->send_completion_tid; |
746 | if (skb) | ||
747 | dev_kfree_skb_any(skb); | ||
748 | packet->page_buf_cnt = 0; | 746 | packet->page_buf_cnt = 0; |
749 | } | 747 | } |
750 | } | 748 | } |
@@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device, | |||
810 | packet, ret); | 808 | packet, ret); |
811 | } | 809 | } |
812 | 810 | ||
811 | if (ret != 0) { | ||
812 | if (section_index != NETVSC_INVALID_INDEX) | ||
813 | netvsc_free_send_slot(net_device, section_index); | ||
814 | } else if (skb) { | ||
815 | dev_kfree_skb_any(skb); | ||
816 | } | ||
817 | |||
813 | return ret; | 818 | return ret; |
814 | } | 819 | } |
815 | 820 | ||
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index a14d87783245..2e195289ddf4 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) | |||
377 | }; | 377 | }; |
378 | 378 | ||
379 | dst = ip6_route_output(dev_net(dev), NULL, &fl6); | 379 | dst = ip6_route_output(dev_net(dev), NULL, &fl6); |
380 | if (IS_ERR(dst)) | 380 | if (dst->error) { |
381 | ret = dst->error; | ||
382 | dst_release(dst); | ||
381 | goto err; | 383 | goto err; |
382 | 384 | } | |
383 | skb_dst_drop(skb); | 385 | skb_dst_drop(skb); |
384 | skb_dst_set(skb, dst); | 386 | skb_dst_set(skb, dst); |
385 | err = ip6_local_out(skb); | 387 | err = ip6_local_out(skb); |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 7df221788cd4..919f4fccc322 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/uio.h> | 18 | #include <linux/uio.h> |
19 | 19 | ||
20 | #include <net/ipv6.h> | ||
21 | #include <net/net_namespace.h> | 20 | #include <net/net_namespace.h> |
22 | #include <net/rtnetlink.h> | 21 | #include <net/rtnetlink.h> |
23 | #include <net/sock.h> | 22 | #include <net/sock.h> |
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev; | |||
81 | static const struct proto_ops macvtap_socket_ops; | 80 | static const struct proto_ops macvtap_socket_ops; |
82 | 81 | ||
83 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ | 82 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ |
84 | NETIF_F_TSO6) | 83 | NETIF_F_TSO6 | NETIF_F_UFO) |
85 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) | 84 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
86 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) | 85 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) |
87 | 86 | ||
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, | |||
586 | gso_type = SKB_GSO_TCPV6; | 585 | gso_type = SKB_GSO_TCPV6; |
587 | break; | 586 | break; |
588 | case VIRTIO_NET_HDR_GSO_UDP: | 587 | case VIRTIO_NET_HDR_GSO_UDP: |
589 | pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", | ||
590 | current->comm); | ||
591 | gso_type = SKB_GSO_UDP; | 588 | gso_type = SKB_GSO_UDP; |
592 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
593 | ipv6_proxy_select_ident(skb); | ||
594 | break; | 589 | break; |
595 | default: | 590 | default: |
596 | return -EINVAL; | 591 | return -EINVAL; |
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, | |||
636 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 631 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
637 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | 632 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
638 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | 633 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
634 | else if (sinfo->gso_type & SKB_GSO_UDP) | ||
635 | vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; | ||
639 | else | 636 | else |
640 | BUG(); | 637 | BUG(); |
641 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | 638 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) |
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) | |||
965 | if (arg & TUN_F_TSO6) | 962 | if (arg & TUN_F_TSO6) |
966 | feature_mask |= NETIF_F_TSO6; | 963 | feature_mask |= NETIF_F_TSO6; |
967 | } | 964 | } |
965 | |||
966 | if (arg & TUN_F_UFO) | ||
967 | feature_mask |= NETIF_F_UFO; | ||
968 | } | 968 | } |
969 | 969 | ||
970 | /* tun/tap driver inverts the usage for TSO offloads, where | 970 | /* tun/tap driver inverts the usage for TSO offloads, where |
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) | |||
975 | * When user space turns off TSO, we turn off GSO/LRO so that | 975 | * When user space turns off TSO, we turn off GSO/LRO so that |
976 | * user-space will not receive TSO frames. | 976 | * user-space will not receive TSO frames. |
977 | */ | 977 | */ |
978 | if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) | 978 | if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) |
979 | features |= RX_OFFLOADS; | 979 | features |= RX_OFFLOADS; |
980 | else | 980 | else |
981 | features &= ~RX_OFFLOADS; | 981 | features &= ~RX_OFFLOADS; |
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, | |||
1090 | case TUNSETOFFLOAD: | 1090 | case TUNSETOFFLOAD: |
1091 | /* let the user check for future flags */ | 1091 | /* let the user check for future flags */ |
1092 | if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | | 1092 | if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | |
1093 | TUN_F_TSO_ECN)) | 1093 | TUN_F_TSO_ECN | TUN_F_UFO)) |
1094 | return -EINVAL; | 1094 | return -EINVAL; |
1095 | 1095 | ||
1096 | rtnl_lock(); | 1096 | rtnl_lock(); |
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c index 602c625d95d5..b5edc7f96a39 100644 --- a/drivers/net/ppp/ppp_deflate.c +++ b/drivers/net/ppp/ppp_deflate.c | |||
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, | |||
246 | /* | 246 | /* |
247 | * See if we managed to reduce the size of the packet. | 247 | * See if we managed to reduce the size of the packet. |
248 | */ | 248 | */ |
249 | if (olen < isize) { | 249 | if (olen < isize && olen <= osize) { |
250 | state->stats.comp_bytes += olen; | 250 | state->stats.comp_bytes += olen; |
251 | state->stats.comp_packets++; | 251 | state->stats.comp_packets++; |
252 | } else { | 252 | } else { |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8c8dc16839a7..10f9e4021b5a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -65,7 +65,6 @@ | |||
65 | #include <linux/nsproxy.h> | 65 | #include <linux/nsproxy.h> |
66 | #include <linux/virtio_net.h> | 66 | #include <linux/virtio_net.h> |
67 | #include <linux/rcupdate.h> | 67 | #include <linux/rcupdate.h> |
68 | #include <net/ipv6.h> | ||
69 | #include <net/net_namespace.h> | 68 | #include <net/net_namespace.h> |
70 | #include <net/netns/generic.h> | 69 | #include <net/netns/generic.h> |
71 | #include <net/rtnetlink.h> | 70 | #include <net/rtnetlink.h> |
@@ -187,7 +186,7 @@ struct tun_struct { | |||
187 | struct net_device *dev; | 186 | struct net_device *dev; |
188 | netdev_features_t set_features; | 187 | netdev_features_t set_features; |
189 | #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ | 188 | #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ |
190 | NETIF_F_TSO6) | 189 | NETIF_F_TSO6|NETIF_F_UFO) |
191 | 190 | ||
192 | int vnet_hdr_sz; | 191 | int vnet_hdr_sz; |
193 | int sndbuf; | 192 | int sndbuf; |
@@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1167 | break; | 1166 | break; |
1168 | } | 1167 | } |
1169 | 1168 | ||
1170 | skb_reset_network_header(skb); | ||
1171 | |||
1172 | if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 1169 | if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
1173 | pr_debug("GSO!\n"); | 1170 | pr_debug("GSO!\n"); |
1174 | switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | 1171 | switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
@@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1179 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | 1176 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
1180 | break; | 1177 | break; |
1181 | case VIRTIO_NET_HDR_GSO_UDP: | 1178 | case VIRTIO_NET_HDR_GSO_UDP: |
1182 | { | ||
1183 | static bool warned; | ||
1184 | |||
1185 | if (!warned) { | ||
1186 | warned = true; | ||
1187 | netdev_warn(tun->dev, | ||
1188 | "%s: using disabled UFO feature; please fix this program\n", | ||
1189 | current->comm); | ||
1190 | } | ||
1191 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 1179 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
1192 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
1193 | ipv6_proxy_select_ident(skb); | ||
1194 | break; | 1180 | break; |
1195 | } | ||
1196 | default: | 1181 | default: |
1197 | tun->dev->stats.rx_frame_errors++; | 1182 | tun->dev->stats.rx_frame_errors++; |
1198 | kfree_skb(skb); | 1183 | kfree_skb(skb); |
@@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1221 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | 1206 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
1222 | } | 1207 | } |
1223 | 1208 | ||
1209 | skb_reset_network_header(skb); | ||
1224 | skb_probe_transport_header(skb, 0); | 1210 | skb_probe_transport_header(skb, 0); |
1225 | 1211 | ||
1226 | rxhash = skb_get_hash(skb); | 1212 | rxhash = skb_get_hash(skb); |
@@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, | |||
1298 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 1284 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
1299 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | 1285 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
1300 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | 1286 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
1287 | else if (sinfo->gso_type & SKB_GSO_UDP) | ||
1288 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; | ||
1301 | else { | 1289 | else { |
1302 | pr_err("unexpected GSO type: " | 1290 | pr_err("unexpected GSO type: " |
1303 | "0x%x, gso_size %d, hdr_len %d\n", | 1291 | "0x%x, gso_size %d, hdr_len %d\n", |
@@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) | |||
1746 | features |= NETIF_F_TSO6; | 1734 | features |= NETIF_F_TSO6; |
1747 | arg &= ~(TUN_F_TSO4|TUN_F_TSO6); | 1735 | arg &= ~(TUN_F_TSO4|TUN_F_TSO6); |
1748 | } | 1736 | } |
1737 | |||
1738 | if (arg & TUN_F_UFO) { | ||
1739 | features |= NETIF_F_UFO; | ||
1740 | arg &= ~TUN_F_UFO; | ||
1741 | } | ||
1749 | } | 1742 | } |
1750 | 1743 | ||
1751 | /* This gives the user a way to test for new features in future by | 1744 | /* This gives the user a way to test for new features in future by |
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 99b69af14274..4a1e9c489f1f 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c | |||
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy) | |||
77 | int ret; | 77 | int ret; |
78 | 78 | ||
79 | udelay(1); | 79 | udelay(1); |
80 | ret = sr_read_reg(dev, EPCR, &tmp); | 80 | ret = sr_read_reg(dev, SR_EPCR, &tmp); |
81 | if (ret < 0) | 81 | if (ret < 0) |
82 | return ret; | 82 | return ret; |
83 | 83 | ||
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg, | |||
98 | 98 | ||
99 | mutex_lock(&dev->phy_mutex); | 99 | mutex_lock(&dev->phy_mutex); |
100 | 100 | ||
101 | sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); | 101 | sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); |
102 | sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); | 102 | sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); |
103 | 103 | ||
104 | ret = wait_phy_eeprom_ready(dev, phy); | 104 | ret = wait_phy_eeprom_ready(dev, phy); |
105 | if (ret < 0) | 105 | if (ret < 0) |
106 | goto out_unlock; | 106 | goto out_unlock; |
107 | 107 | ||
108 | sr_write_reg(dev, EPCR, 0x0); | 108 | sr_write_reg(dev, SR_EPCR, 0x0); |
109 | ret = sr_read(dev, EPDR, 2, value); | 109 | ret = sr_read(dev, SR_EPDR, 2, value); |
110 | 110 | ||
111 | netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", | 111 | netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", |
112 | phy, reg, *value, ret); | 112 | phy, reg, *value, ret); |
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg, | |||
123 | 123 | ||
124 | mutex_lock(&dev->phy_mutex); | 124 | mutex_lock(&dev->phy_mutex); |
125 | 125 | ||
126 | ret = sr_write(dev, EPDR, 2, &value); | 126 | ret = sr_write(dev, SR_EPDR, 2, &value); |
127 | if (ret < 0) | 127 | if (ret < 0) |
128 | goto out_unlock; | 128 | goto out_unlock; |
129 | 129 | ||
130 | sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); | 130 | sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); |
131 | sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : | 131 | sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : |
132 | (EPCR_WEP | EPCR_ERPRW)); | 132 | (EPCR_WEP | EPCR_ERPRW)); |
133 | 133 | ||
134 | ret = wait_phy_eeprom_ready(dev, phy); | 134 | ret = wait_phy_eeprom_ready(dev, phy); |
135 | if (ret < 0) | 135 | if (ret < 0) |
136 | goto out_unlock; | 136 | goto out_unlock; |
137 | 137 | ||
138 | sr_write_reg(dev, EPCR, 0x0); | 138 | sr_write_reg(dev, SR_EPCR, 0x0); |
139 | 139 | ||
140 | out_unlock: | 140 | out_unlock: |
141 | mutex_unlock(&dev->phy_mutex); | 141 | mutex_unlock(&dev->phy_mutex); |
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) | |||
188 | if (loc == MII_BMSR) { | 188 | if (loc == MII_BMSR) { |
189 | u8 value; | 189 | u8 value; |
190 | 190 | ||
191 | sr_read_reg(dev, NSR, &value); | 191 | sr_read_reg(dev, SR_NSR, &value); |
192 | if (value & NSR_LINKST) | 192 | if (value & NSR_LINKST) |
193 | rc = 1; | 193 | rc = 1; |
194 | } | 194 | } |
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev) | |||
228 | int rc = 0; | 228 | int rc = 0; |
229 | 229 | ||
230 | /* Get the Link Status directly */ | 230 | /* Get the Link Status directly */ |
231 | sr_read_reg(dev, NSR, &value); | 231 | sr_read_reg(dev, SR_NSR, &value); |
232 | if (value & NSR_LINKST) | 232 | if (value & NSR_LINKST) |
233 | rc = 1; | 233 | rc = 1; |
234 | 234 | ||
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev) | |||
281 | } | 281 | } |
282 | } | 282 | } |
283 | 283 | ||
284 | sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); | 284 | sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes); |
285 | sr_write_reg_async(dev, RCR, rx_ctl); | 285 | sr_write_reg_async(dev, SR_RCR, rx_ctl); |
286 | } | 286 | } |
287 | 287 | ||
288 | static int sr9700_set_mac_address(struct net_device *netdev, void *p) | 288 | static int sr9700_set_mac_address(struct net_device *netdev, void *p) |
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p) | |||
297 | } | 297 | } |
298 | 298 | ||
299 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 299 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
300 | sr_write_async(dev, PAR, 6, netdev->dev_addr); | 300 | sr_write_async(dev, SR_PAR, 6, netdev->dev_addr); |
301 | 301 | ||
302 | return 0; | 302 | return 0; |
303 | } | 303 | } |
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) | |||
340 | mii->phy_id_mask = 0x1f; | 340 | mii->phy_id_mask = 0x1f; |
341 | mii->reg_num_mask = 0x1f; | 341 | mii->reg_num_mask = 0x1f; |
342 | 342 | ||
343 | sr_write_reg(dev, NCR, NCR_RST); | 343 | sr_write_reg(dev, SR_NCR, NCR_RST); |
344 | udelay(20); | 344 | udelay(20); |
345 | 345 | ||
346 | /* read MAC | 346 | /* read MAC |
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) | |||
348 | * EEPROM automatically to PAR. In case there is no EEPROM externally, | 348 | * EEPROM automatically to PAR. In case there is no EEPROM externally, |
349 | * a default MAC address is stored in PAR for making chip work properly. | 349 | * a default MAC address is stored in PAR for making chip work properly. |
350 | */ | 350 | */ |
351 | if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { | 351 | if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) { |
352 | netdev_err(netdev, "Error reading MAC address\n"); | 352 | netdev_err(netdev, "Error reading MAC address\n"); |
353 | ret = -ENODEV; | 353 | ret = -ENODEV; |
354 | goto out; | 354 | goto out; |
355 | } | 355 | } |
356 | 356 | ||
357 | /* power up and reset phy */ | 357 | /* power up and reset phy */ |
358 | sr_write_reg(dev, PRR, PRR_PHY_RST); | 358 | sr_write_reg(dev, SR_PRR, PRR_PHY_RST); |
359 | /* at least 10ms, here 20ms for safe */ | 359 | /* at least 10ms, here 20ms for safe */ |
360 | mdelay(20); | 360 | mdelay(20); |
361 | sr_write_reg(dev, PRR, 0); | 361 | sr_write_reg(dev, SR_PRR, 0); |
362 | /* at least 1ms, here 2ms for reading right register */ | 362 | /* at least 1ms, here 2ms for reading right register */ |
363 | udelay(2 * 1000); | 363 | udelay(2 * 1000); |
364 | 364 | ||
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h index fd687c575e74..258b030277e7 100644 --- a/drivers/net/usb/sr9700.h +++ b/drivers/net/usb/sr9700.h | |||
@@ -14,13 +14,13 @@ | |||
14 | /* sr9700 spec. register table on Linux platform */ | 14 | /* sr9700 spec. register table on Linux platform */ |
15 | 15 | ||
16 | /* Network Control Reg */ | 16 | /* Network Control Reg */ |
17 | #define NCR 0x00 | 17 | #define SR_NCR 0x00 |
18 | #define NCR_RST (1 << 0) | 18 | #define NCR_RST (1 << 0) |
19 | #define NCR_LBK (3 << 1) | 19 | #define NCR_LBK (3 << 1) |
20 | #define NCR_FDX (1 << 3) | 20 | #define NCR_FDX (1 << 3) |
21 | #define NCR_WAKEEN (1 << 6) | 21 | #define NCR_WAKEEN (1 << 6) |
22 | /* Network Status Reg */ | 22 | /* Network Status Reg */ |
23 | #define NSR 0x01 | 23 | #define SR_NSR 0x01 |
24 | #define NSR_RXRDY (1 << 0) | 24 | #define NSR_RXRDY (1 << 0) |
25 | #define NSR_RXOV (1 << 1) | 25 | #define NSR_RXOV (1 << 1) |
26 | #define NSR_TX1END (1 << 2) | 26 | #define NSR_TX1END (1 << 2) |
@@ -30,7 +30,7 @@ | |||
30 | #define NSR_LINKST (1 << 6) | 30 | #define NSR_LINKST (1 << 6) |
31 | #define NSR_SPEED (1 << 7) | 31 | #define NSR_SPEED (1 << 7) |
32 | /* Tx Control Reg */ | 32 | /* Tx Control Reg */ |
33 | #define TCR 0x02 | 33 | #define SR_TCR 0x02 |
34 | #define TCR_CRC_DIS (1 << 1) | 34 | #define TCR_CRC_DIS (1 << 1) |
35 | #define TCR_PAD_DIS (1 << 2) | 35 | #define TCR_PAD_DIS (1 << 2) |
36 | #define TCR_LC_CARE (1 << 3) | 36 | #define TCR_LC_CARE (1 << 3) |
@@ -38,7 +38,7 @@ | |||
38 | #define TCR_EXCECM (1 << 5) | 38 | #define TCR_EXCECM (1 << 5) |
39 | #define TCR_LF_EN (1 << 6) | 39 | #define TCR_LF_EN (1 << 6) |
40 | /* Tx Status Reg for Packet Index 1 */ | 40 | /* Tx Status Reg for Packet Index 1 */ |
41 | #define TSR1 0x03 | 41 | #define SR_TSR1 0x03 |
42 | #define TSR1_EC (1 << 2) | 42 | #define TSR1_EC (1 << 2) |
43 | #define TSR1_COL (1 << 3) | 43 | #define TSR1_COL (1 << 3) |
44 | #define TSR1_LC (1 << 4) | 44 | #define TSR1_LC (1 << 4) |
@@ -46,7 +46,7 @@ | |||
46 | #define TSR1_LOC (1 << 6) | 46 | #define TSR1_LOC (1 << 6) |
47 | #define TSR1_TLF (1 << 7) | 47 | #define TSR1_TLF (1 << 7) |
48 | /* Tx Status Reg for Packet Index 2 */ | 48 | /* Tx Status Reg for Packet Index 2 */ |
49 | #define TSR2 0x04 | 49 | #define SR_TSR2 0x04 |
50 | #define TSR2_EC (1 << 2) | 50 | #define TSR2_EC (1 << 2) |
51 | #define TSR2_COL (1 << 3) | 51 | #define TSR2_COL (1 << 3) |
52 | #define TSR2_LC (1 << 4) | 52 | #define TSR2_LC (1 << 4) |
@@ -54,7 +54,7 @@ | |||
54 | #define TSR2_LOC (1 << 6) | 54 | #define TSR2_LOC (1 << 6) |
55 | #define TSR2_TLF (1 << 7) | 55 | #define TSR2_TLF (1 << 7) |
56 | /* Rx Control Reg*/ | 56 | /* Rx Control Reg*/ |
57 | #define RCR 0x05 | 57 | #define SR_RCR 0x05 |
58 | #define RCR_RXEN (1 << 0) | 58 | #define RCR_RXEN (1 << 0) |
59 | #define RCR_PRMSC (1 << 1) | 59 | #define RCR_PRMSC (1 << 1) |
60 | #define RCR_RUNT (1 << 2) | 60 | #define RCR_RUNT (1 << 2) |
@@ -62,87 +62,87 @@ | |||
62 | #define RCR_DIS_CRC (1 << 4) | 62 | #define RCR_DIS_CRC (1 << 4) |
63 | #define RCR_DIS_LONG (1 << 5) | 63 | #define RCR_DIS_LONG (1 << 5) |
64 | /* Rx Status Reg */ | 64 | /* Rx Status Reg */ |
65 | #define RSR 0x06 | 65 | #define SR_RSR 0x06 |
66 | #define RSR_AE (1 << 2) | 66 | #define RSR_AE (1 << 2) |
67 | #define RSR_MF (1 << 6) | 67 | #define RSR_MF (1 << 6) |
68 | #define RSR_RF (1 << 7) | 68 | #define RSR_RF (1 << 7) |
69 | /* Rx Overflow Counter Reg */ | 69 | /* Rx Overflow Counter Reg */ |
70 | #define ROCR 0x07 | 70 | #define SR_ROCR 0x07 |
71 | #define ROCR_ROC (0x7F << 0) | 71 | #define ROCR_ROC (0x7F << 0) |
72 | #define ROCR_RXFU (1 << 7) | 72 | #define ROCR_RXFU (1 << 7) |
73 | /* Back Pressure Threshold Reg */ | 73 | /* Back Pressure Threshold Reg */ |
74 | #define BPTR 0x08 | 74 | #define SR_BPTR 0x08 |
75 | #define BPTR_JPT (0x0F << 0) | 75 | #define BPTR_JPT (0x0F << 0) |
76 | #define BPTR_BPHW (0x0F << 4) | 76 | #define BPTR_BPHW (0x0F << 4) |
77 | /* Flow Control Threshold Reg */ | 77 | /* Flow Control Threshold Reg */ |
78 | #define FCTR 0x09 | 78 | #define SR_FCTR 0x09 |
79 | #define FCTR_LWOT (0x0F << 0) | 79 | #define FCTR_LWOT (0x0F << 0) |
80 | #define FCTR_HWOT (0x0F << 4) | 80 | #define FCTR_HWOT (0x0F << 4) |
81 | /* rx/tx Flow Control Reg */ | 81 | /* rx/tx Flow Control Reg */ |
82 | #define FCR 0x0A | 82 | #define SR_FCR 0x0A |
83 | #define FCR_FLCE (1 << 0) | 83 | #define FCR_FLCE (1 << 0) |
84 | #define FCR_BKPA (1 << 4) | 84 | #define FCR_BKPA (1 << 4) |
85 | #define FCR_TXPEN (1 << 5) | 85 | #define FCR_TXPEN (1 << 5) |
86 | #define FCR_TXPF (1 << 6) | 86 | #define FCR_TXPF (1 << 6) |
87 | #define FCR_TXP0 (1 << 7) | 87 | #define FCR_TXP0 (1 << 7) |
88 | /* Eeprom & Phy Control Reg */ | 88 | /* Eeprom & Phy Control Reg */ |
89 | #define EPCR 0x0B | 89 | #define SR_EPCR 0x0B |
90 | #define EPCR_ERRE (1 << 0) | 90 | #define EPCR_ERRE (1 << 0) |
91 | #define EPCR_ERPRW (1 << 1) | 91 | #define EPCR_ERPRW (1 << 1) |
92 | #define EPCR_ERPRR (1 << 2) | 92 | #define EPCR_ERPRR (1 << 2) |
93 | #define EPCR_EPOS (1 << 3) | 93 | #define EPCR_EPOS (1 << 3) |
94 | #define EPCR_WEP (1 << 4) | 94 | #define EPCR_WEP (1 << 4) |
95 | /* Eeprom & Phy Address Reg */ | 95 | /* Eeprom & Phy Address Reg */ |
96 | #define EPAR 0x0C | 96 | #define SR_EPAR 0x0C |
97 | #define EPAR_EROA (0x3F << 0) | 97 | #define EPAR_EROA (0x3F << 0) |
98 | #define EPAR_PHY_ADR_MASK (0x03 << 6) | 98 | #define EPAR_PHY_ADR_MASK (0x03 << 6) |
99 | #define EPAR_PHY_ADR (0x01 << 6) | 99 | #define EPAR_PHY_ADR (0x01 << 6) |
100 | /* Eeprom & Phy Data Reg */ | 100 | /* Eeprom & Phy Data Reg */ |
101 | #define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ | 101 | #define SR_EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */ |
102 | /* Wakeup Control Reg */ | 102 | /* Wakeup Control Reg */ |
103 | #define WCR 0x0F | 103 | #define SR_WCR 0x0F |
104 | #define WCR_MAGICST (1 << 0) | 104 | #define WCR_MAGICST (1 << 0) |
105 | #define WCR_LINKST (1 << 2) | 105 | #define WCR_LINKST (1 << 2) |
106 | #define WCR_MAGICEN (1 << 3) | 106 | #define WCR_MAGICEN (1 << 3) |
107 | #define WCR_LINKEN (1 << 5) | 107 | #define WCR_LINKEN (1 << 5) |
108 | /* Physical Address Reg */ | 108 | /* Physical Address Reg */ |
109 | #define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ | 109 | #define SR_PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */ |
110 | /* Multicast Address Reg */ | 110 | /* Multicast Address Reg */ |
111 | #define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ | 111 | #define SR_MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */ |
112 | /* 0x1e unused */ | 112 | /* 0x1e unused */ |
113 | /* Phy Reset Reg */ | 113 | /* Phy Reset Reg */ |
114 | #define PRR 0x1F | 114 | #define SR_PRR 0x1F |
115 | #define PRR_PHY_RST (1 << 0) | 115 | #define PRR_PHY_RST (1 << 0) |
116 | /* Tx sdram Write Pointer Address Low */ | 116 | /* Tx sdram Write Pointer Address Low */ |
117 | #define TWPAL 0x20 | 117 | #define SR_TWPAL 0x20 |
118 | /* Tx sdram Write Pointer Address High */ | 118 | /* Tx sdram Write Pointer Address High */ |
119 | #define TWPAH 0x21 | 119 | #define SR_TWPAH 0x21 |
120 | /* Tx sdram Read Pointer Address Low */ | 120 | /* Tx sdram Read Pointer Address Low */ |
121 | #define TRPAL 0x22 | 121 | #define SR_TRPAL 0x22 |
122 | /* Tx sdram Read Pointer Address High */ | 122 | /* Tx sdram Read Pointer Address High */ |
123 | #define TRPAH 0x23 | 123 | #define SR_TRPAH 0x23 |
124 | /* Rx sdram Write Pointer Address Low */ | 124 | /* Rx sdram Write Pointer Address Low */ |
125 | #define RWPAL 0x24 | 125 | #define SR_RWPAL 0x24 |
126 | /* Rx sdram Write Pointer Address High */ | 126 | /* Rx sdram Write Pointer Address High */ |
127 | #define RWPAH 0x25 | 127 | #define SR_RWPAH 0x25 |
128 | /* Rx sdram Read Pointer Address Low */ | 128 | /* Rx sdram Read Pointer Address Low */ |
129 | #define RRPAL 0x26 | 129 | #define SR_RRPAL 0x26 |
130 | /* Rx sdram Read Pointer Address High */ | 130 | /* Rx sdram Read Pointer Address High */ |
131 | #define RRPAH 0x27 | 131 | #define SR_RRPAH 0x27 |
132 | /* Vendor ID register */ | 132 | /* Vendor ID register */ |
133 | #define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ | 133 | #define SR_VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */ |
134 | /* Product ID register */ | 134 | /* Product ID register */ |
135 | #define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ | 135 | #define SR_PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */ |
136 | /* CHIP Revision register */ | 136 | /* CHIP Revision register */ |
137 | #define CHIPR 0x2C | 137 | #define SR_CHIPR 0x2C |
138 | /* 0x2D --> 0xEF unused */ | 138 | /* 0x2D --> 0xEF unused */ |
139 | /* USB Device Address */ | 139 | /* USB Device Address */ |
140 | #define USBDA 0xF0 | 140 | #define SR_USBDA 0xF0 |
141 | #define USBDA_USBFA (0x7F << 0) | 141 | #define USBDA_USBFA (0x7F << 0) |
142 | /* RX packet Counter Reg */ | 142 | /* RX packet Counter Reg */ |
143 | #define RXC 0xF1 | 143 | #define SR_RXC 0xF1 |
144 | /* Tx packet Counter & USB Status Reg */ | 144 | /* Tx packet Counter & USB Status Reg */ |
145 | #define TXC_USBS 0xF2 | 145 | #define SR_TXC_USBS 0xF2 |
146 | #define TXC_USBS_TXC0 (1 << 0) | 146 | #define TXC_USBS_TXC0 (1 << 0) |
147 | #define TXC_USBS_TXC1 (1 << 1) | 147 | #define TXC_USBS_TXC1 (1 << 1) |
148 | #define TXC_USBS_TXC2 (1 << 2) | 148 | #define TXC_USBS_TXC2 (1 << 2) |
@@ -150,7 +150,7 @@ | |||
150 | #define TXC_USBS_SUSFLAG (1 << 6) | 150 | #define TXC_USBS_SUSFLAG (1 << 6) |
151 | #define TXC_USBS_RXFAULT (1 << 7) | 151 | #define TXC_USBS_RXFAULT (1 << 7) |
152 | /* USB Control register */ | 152 | /* USB Control register */ |
153 | #define USBC 0xF4 | 153 | #define SR_USBC 0xF4 |
154 | #define USBC_EP3NAK (1 << 4) | 154 | #define USBC_EP3NAK (1 << 4) |
155 | #define USBC_EP3ACK (1 << 5) | 155 | #define USBC_EP3ACK (1 << 5) |
156 | 156 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5ca97713bfb3..059fdf1bf5ee 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, | |||
490 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | 490 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
491 | break; | 491 | break; |
492 | case VIRTIO_NET_HDR_GSO_UDP: | 492 | case VIRTIO_NET_HDR_GSO_UDP: |
493 | { | ||
494 | static bool warned; | ||
495 | |||
496 | if (!warned) { | ||
497 | warned = true; | ||
498 | netdev_warn(dev, | ||
499 | "host using disabled UFO feature; please fix it\n"); | ||
500 | } | ||
501 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 493 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
502 | break; | 494 | break; |
503 | } | ||
504 | case VIRTIO_NET_HDR_GSO_TCPV6: | 495 | case VIRTIO_NET_HDR_GSO_TCPV6: |
505 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | 496 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
506 | break; | 497 | break; |
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | |||
888 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 879 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
889 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 880 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
890 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | 881 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
882 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) | ||
883 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; | ||
891 | else | 884 | else |
892 | BUG(); | 885 | BUG(); |
893 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) | 886 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
@@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1748 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; | 1741 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
1749 | 1742 | ||
1750 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { | 1743 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
1751 | dev->hw_features |= NETIF_F_TSO | 1744 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
1752 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; | 1745 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
1753 | } | 1746 | } |
1754 | /* Individual feature bits: what can host handle? */ | 1747 | /* Individual feature bits: what can host handle? */ |
@@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1758 | dev->hw_features |= NETIF_F_TSO6; | 1751 | dev->hw_features |= NETIF_F_TSO6; |
1759 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) | 1752 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
1760 | dev->hw_features |= NETIF_F_TSO_ECN; | 1753 | dev->hw_features |= NETIF_F_TSO_ECN; |
1754 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) | ||
1755 | dev->hw_features |= NETIF_F_UFO; | ||
1761 | 1756 | ||
1762 | if (gso) | 1757 | if (gso) |
1763 | dev->features |= dev->hw_features & NETIF_F_ALL_TSO; | 1758 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
1764 | /* (!csum && gso) case will be fixed by register_netdev() */ | 1759 | /* (!csum && gso) case will be fixed by register_netdev() */ |
1765 | } | 1760 | } |
1766 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) | 1761 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
@@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1798 | /* If we can receive ANY GSO packets, we must allocate large ones. */ | 1793 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
1799 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || | 1794 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
1800 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | 1795 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
1801 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) | 1796 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
1797 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) | ||
1802 | vi->big_packets = true; | 1798 | vi->big_packets = true; |
1803 | 1799 | ||
1804 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) | 1800 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
@@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = { | |||
1994 | static unsigned int features[] = { | 1990 | static unsigned int features[] = { |
1995 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, | 1991 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
1996 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 1992 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
1997 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6, | 1993 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
1998 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, | 1994 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
1999 | VIRTIO_NET_F_GUEST_ECN, | 1995 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
2000 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, | 1996 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
2001 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, | 1997 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
2002 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, | 1998 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7fbd89fbe107..a8c755dcab14 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work) | |||
2432 | dev_put(vxlan->dev); | 2432 | dev_put(vxlan->dev); |
2433 | } | 2433 | } |
2434 | 2434 | ||
2435 | static int vxlan_newlink(struct net *net, struct net_device *dev, | 2435 | static int vxlan_newlink(struct net *src_net, struct net_device *dev, |
2436 | struct nlattr *tb[], struct nlattr *data[]) | 2436 | struct nlattr *tb[], struct nlattr *data[]) |
2437 | { | 2437 | { |
2438 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); | 2438 | struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); |
2439 | struct vxlan_dev *vxlan = netdev_priv(dev); | 2439 | struct vxlan_dev *vxlan = netdev_priv(dev); |
2440 | struct vxlan_rdst *dst = &vxlan->default_dst; | 2440 | struct vxlan_rdst *dst = &vxlan->default_dst; |
2441 | __u32 vni; | 2441 | __u32 vni; |
@@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
2445 | if (!data[IFLA_VXLAN_ID]) | 2445 | if (!data[IFLA_VXLAN_ID]) |
2446 | return -EINVAL; | 2446 | return -EINVAL; |
2447 | 2447 | ||
2448 | vxlan->net = dev_net(dev); | 2448 | vxlan->net = src_net; |
2449 | 2449 | ||
2450 | vni = nla_get_u32(data[IFLA_VXLAN_ID]); | 2450 | vni = nla_get_u32(data[IFLA_VXLAN_ID]); |
2451 | dst->remote_vni = vni; | 2451 | dst->remote_vni = vni; |
@@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
2481 | if (data[IFLA_VXLAN_LINK] && | 2481 | if (data[IFLA_VXLAN_LINK] && |
2482 | (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { | 2482 | (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { |
2483 | struct net_device *lowerdev | 2483 | struct net_device *lowerdev |
2484 | = __dev_get_by_index(net, dst->remote_ifindex); | 2484 | = __dev_get_by_index(src_net, dst->remote_ifindex); |
2485 | 2485 | ||
2486 | if (!lowerdev) { | 2486 | if (!lowerdev) { |
2487 | pr_info("ifindex %d does not exist\n", dst->remote_ifindex); | 2487 | pr_info("ifindex %d does not exist\n", dst->remote_ifindex); |
@@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, | |||
2557 | nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) | 2557 | nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) |
2558 | vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; | 2558 | vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; |
2559 | 2559 | ||
2560 | if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, | 2560 | if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, |
2561 | vxlan->dst_port)) { | 2561 | vxlan->dst_port)) { |
2562 | pr_info("duplicate VNI %u\n", vni); | 2562 | pr_info("duplicate VNI %u\n", vni); |
2563 | return -EEXIST; | 2563 | return -EEXIST; |
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 94e234975c61..a2fdd15f285a 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig | |||
@@ -25,7 +25,7 @@ if WAN | |||
25 | # There is no way to detect a comtrol sv11 - force it modular for now. | 25 | # There is no way to detect a comtrol sv11 - force it modular for now. |
26 | config HOSTESS_SV11 | 26 | config HOSTESS_SV11 |
27 | tristate "Comtrol Hostess SV-11 support" | 27 | tristate "Comtrol Hostess SV-11 support" |
28 | depends on ISA && m && ISA_DMA_API && INET && HDLC | 28 | depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS |
29 | help | 29 | help |
30 | Driver for Comtrol Hostess SV-11 network card which | 30 | Driver for Comtrol Hostess SV-11 network card which |
31 | operates on low speed synchronous serial links at up to | 31 | operates on low speed synchronous serial links at up to |
@@ -37,7 +37,7 @@ config HOSTESS_SV11 | |||
37 | # The COSA/SRP driver has not been tested as non-modular yet. | 37 | # The COSA/SRP driver has not been tested as non-modular yet. |
38 | config COSA | 38 | config COSA |
39 | tristate "COSA/SRP sync serial boards support" | 39 | tristate "COSA/SRP sync serial boards support" |
40 | depends on ISA && m && ISA_DMA_API && HDLC | 40 | depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS |
41 | ---help--- | 41 | ---help--- |
42 | Driver for COSA and SRP synchronous serial boards. | 42 | Driver for COSA and SRP synchronous serial boards. |
43 | 43 | ||
@@ -87,7 +87,7 @@ config LANMEDIA | |||
87 | # There is no way to detect a Sealevel board. Force it modular | 87 | # There is no way to detect a Sealevel board. Force it modular |
88 | config SEALEVEL_4021 | 88 | config SEALEVEL_4021 |
89 | tristate "Sealevel Systems 4021 support" | 89 | tristate "Sealevel Systems 4021 support" |
90 | depends on ISA && m && ISA_DMA_API && INET && HDLC | 90 | depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS |
91 | help | 91 | help |
92 | This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. | 92 | This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. |
93 | 93 | ||
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9a72640237cb..62b0bf4fdf6b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
285 | 285 | ||
286 | __ath_cancel_work(sc); | 286 | __ath_cancel_work(sc); |
287 | 287 | ||
288 | disable_irq(sc->irq); | ||
288 | tasklet_disable(&sc->intr_tq); | 289 | tasklet_disable(&sc->intr_tq); |
289 | tasklet_disable(&sc->bcon_tasklet); | 290 | tasklet_disable(&sc->bcon_tasklet); |
290 | spin_lock_bh(&sc->sc_pcu_lock); | 291 | spin_lock_bh(&sc->sc_pcu_lock); |
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
331 | r = -EIO; | 332 | r = -EIO; |
332 | 333 | ||
333 | out: | 334 | out: |
335 | enable_irq(sc->irq); | ||
334 | spin_unlock_bh(&sc->sc_pcu_lock); | 336 | spin_unlock_bh(&sc->sc_pcu_lock); |
335 | tasklet_enable(&sc->bcon_tasklet); | 337 | tasklet_enable(&sc->bcon_tasklet); |
336 | tasklet_enable(&sc->intr_tq); | 338 | tasklet_enable(&sc->intr_tq); |
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
512 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) | 514 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) |
513 | return IRQ_NONE; | 515 | return IRQ_NONE; |
514 | 516 | ||
515 | if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) | ||
516 | return IRQ_NONE; | ||
517 | |||
518 | /* shared irq, not for us */ | 517 | /* shared irq, not for us */ |
519 | if (!ath9k_hw_intrpend(ah)) | 518 | if (!ath9k_hw_intrpend(ah)) |
520 | return IRQ_NONE; | 519 | return IRQ_NONE; |
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
529 | ath9k_debug_sync_cause(sc, sync_cause); | 528 | ath9k_debug_sync_cause(sc, sync_cause); |
530 | status &= ah->imask; /* discard unasked-for bits */ | 529 | status &= ah->imask; /* discard unasked-for bits */ |
531 | 530 | ||
532 | if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) | 531 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) |
533 | return IRQ_HANDLED; | 532 | return IRQ_HANDLED; |
534 | 533 | ||
535 | /* | 534 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 1bbe4fc47b97..660ddb1b7d8a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h | |||
@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag { | |||
246 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, | 246 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, |
247 | * regardless of the band or the number of the probes. FW will calculate | 247 | * regardless of the band or the number of the probes. FW will calculate |
248 | * the actual dwell time. | 248 | * the actual dwell time. |
249 | * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. | ||
249 | */ | 250 | */ |
250 | enum iwl_ucode_tlv_api { | 251 | enum iwl_ucode_tlv_api { |
251 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), | 252 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), |
@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api { | |||
257 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), | 258 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), |
258 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), | 259 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), |
259 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), | 260 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), |
261 | IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), | ||
260 | }; | 262 | }; |
261 | 263 | ||
262 | /** | 264 | /** |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 201846de94e7..cfc0e65b34a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags { | |||
653 | }; | 653 | }; |
654 | 654 | ||
655 | /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S | 655 | /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S |
656 | * @flags: enum iwl_scan_channel_flgs | 656 | * @flags: enum iwl_scan_channel_flags |
657 | * @non_ebs_ratio: how many regular scan iteration before EBS | 657 | * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is |
658 | * involved. | ||
659 | * 1 - EBS is disabled. | ||
660 | * 2 - every second scan will be full scan(and so on). | ||
658 | */ | 661 | */ |
659 | struct iwl_scan_channel_opt { | 662 | struct iwl_scan_channel_opt { |
660 | __le16 flags; | 663 | __le16 flags; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e880f9d4717b..20915587c820 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | |||
3343 | msk |= mvmsta->tfd_queue_msk; | 3343 | msk |= mvmsta->tfd_queue_msk; |
3344 | } | 3344 | } |
3345 | 3345 | ||
3346 | if (drop) { | 3346 | msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); |
3347 | if (iwl_mvm_flush_tx_path(mvm, msk, true)) | ||
3348 | IWL_ERR(mvm, "flush request fail\n"); | ||
3349 | mutex_unlock(&mvm->mutex); | ||
3350 | } else { | ||
3351 | mutex_unlock(&mvm->mutex); | ||
3352 | 3347 | ||
3353 | /* this can take a while, and we may need/want other operations | 3348 | if (iwl_mvm_flush_tx_path(mvm, msk, true)) |
3354 | * to succeed while doing this, so do it without the mutex held | 3349 | IWL_ERR(mvm, "flush request fail\n"); |
3355 | */ | 3350 | mutex_unlock(&mvm->mutex); |
3356 | iwl_trans_wait_tx_queue_empty(mvm->trans, msk); | 3351 | |
3357 | } | 3352 | /* this can take a while, and we may need/want other operations |
3353 | * to succeed while doing this, so do it without the mutex held | ||
3354 | */ | ||
3355 | iwl_trans_wait_tx_queue_empty(mvm->trans, msk); | ||
3358 | } | 3356 | } |
3359 | 3357 | ||
3360 | const struct ieee80211_ops iwl_mvm_hw_ops = { | 3358 | const struct ieee80211_ops iwl_mvm_hw_ops = { |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index ec9a8e7bae1d..844bf7c4c8de 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
@@ -72,6 +72,8 @@ | |||
72 | 72 | ||
73 | #define IWL_PLCP_QUIET_THRESH 1 | 73 | #define IWL_PLCP_QUIET_THRESH 1 |
74 | #define IWL_ACTIVE_QUIET_TIME 10 | 74 | #define IWL_ACTIVE_QUIET_TIME 10 |
75 | #define IWL_DENSE_EBS_SCAN_RATIO 5 | ||
76 | #define IWL_SPARSE_EBS_SCAN_RATIO 1 | ||
75 | 77 | ||
76 | struct iwl_mvm_scan_params { | 78 | struct iwl_mvm_scan_params { |
77 | u32 max_out_time; | 79 | u32 max_out_time; |
@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1105 | return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, | 1107 | return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, |
1106 | notify); | 1108 | notify); |
1107 | 1109 | ||
1110 | if (mvm->scan_status == IWL_MVM_SCAN_NONE) | ||
1111 | return 0; | ||
1112 | |||
1113 | if (iwl_mvm_is_radio_killed(mvm)) | ||
1114 | goto out; | ||
1115 | |||
1108 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && | 1116 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && |
1109 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || | 1117 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || |
1110 | mvm->scan_status != IWL_MVM_SCAN_OS)) { | 1118 | mvm->scan_status != IWL_MVM_SCAN_OS)) { |
@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1141 | if (mvm->scan_status == IWL_MVM_SCAN_OS) | 1149 | if (mvm->scan_status == IWL_MVM_SCAN_OS) |
1142 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); | 1150 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); |
1143 | 1151 | ||
1152 | out: | ||
1144 | mvm->scan_status = IWL_MVM_SCAN_NONE; | 1153 | mvm->scan_status = IWL_MVM_SCAN_NONE; |
1145 | 1154 | ||
1146 | if (notify) { | 1155 | if (notify) { |
@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, | |||
1297 | cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); | 1306 | cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); |
1298 | cmd->iter_num = cpu_to_le32(1); | 1307 | cmd->iter_num = cpu_to_le32(1); |
1299 | 1308 | ||
1300 | if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && | ||
1301 | mvm->last_ebs_successful) { | ||
1302 | cmd->channel_opt[0].flags = | ||
1303 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1304 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1305 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1306 | cmd->channel_opt[1].flags = | ||
1307 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1308 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1309 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1310 | } | ||
1311 | |||
1312 | if (iwl_mvm_rrm_scan_needed(mvm)) | 1309 | if (iwl_mvm_rrm_scan_needed(mvm)) |
1313 | cmd->scan_flags |= | 1310 | cmd->scan_flags |= |
1314 | cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); | 1311 | cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); |
@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, | |||
1383 | cmd->schedule[1].iterations = 0; | 1380 | cmd->schedule[1].iterations = 0; |
1384 | cmd->schedule[1].full_scan_mul = 0; | 1381 | cmd->schedule[1].full_scan_mul = 0; |
1385 | 1382 | ||
1383 | if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS && | ||
1384 | mvm->last_ebs_successful) { | ||
1385 | cmd->channel_opt[0].flags = | ||
1386 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1387 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1388 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1389 | cmd->channel_opt[0].non_ebs_ratio = | ||
1390 | cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); | ||
1391 | cmd->channel_opt[1].flags = | ||
1392 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1393 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1394 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1395 | cmd->channel_opt[1].non_ebs_ratio = | ||
1396 | cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); | ||
1397 | } | ||
1398 | |||
1386 | for (i = 1; i <= req->req.n_ssids; i++) | 1399 | for (i = 1; i <= req->req.n_ssids; i++) |
1387 | ssid_bitmap |= BIT(i); | 1400 | ssid_bitmap |= BIT(i); |
1388 | 1401 | ||
@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, | |||
1483 | cmd->schedule[1].iterations = 0xff; | 1496 | cmd->schedule[1].iterations = 0xff; |
1484 | cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; | 1497 | cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; |
1485 | 1498 | ||
1499 | if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && | ||
1500 | mvm->last_ebs_successful) { | ||
1501 | cmd->channel_opt[0].flags = | ||
1502 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1503 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1504 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1505 | cmd->channel_opt[0].non_ebs_ratio = | ||
1506 | cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); | ||
1507 | cmd->channel_opt[1].flags = | ||
1508 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1509 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1510 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1511 | cmd->channel_opt[1].non_ebs_ratio = | ||
1512 | cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); | ||
1513 | } | ||
1514 | |||
1486 | iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, | 1515 | iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, |
1487 | ssid_bitmap, cmd); | 1516 | ssid_bitmap, cmd); |
1488 | 1517 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4333306ccdee..c59d07567d90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
90 | 90 | ||
91 | if (ieee80211_is_probe_resp(fc)) | 91 | if (ieee80211_is_probe_resp(fc)) |
92 | tx_flags |= TX_CMD_FLG_TSF; | 92 | tx_flags |= TX_CMD_FLG_TSF; |
93 | else if (ieee80211_is_back_req(fc)) | ||
94 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; | ||
95 | 93 | ||
96 | if (ieee80211_has_morefrags(fc)) | 94 | if (ieee80211_has_morefrags(fc)) |
97 | tx_flags |= TX_CMD_FLG_MORE_FRAG; | 95 | tx_flags |= TX_CMD_FLG_MORE_FRAG; |
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
100 | u8 *qc = ieee80211_get_qos_ctl(hdr); | 98 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
101 | tx_cmd->tid_tspec = qc[0] & 0xf; | 99 | tx_cmd->tid_tspec = qc[0] & 0xf; |
102 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; | 100 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; |
101 | } else if (ieee80211_is_back_req(fc)) { | ||
102 | struct ieee80211_bar *bar = (void *)skb->data; | ||
103 | u16 control = le16_to_cpu(bar->control); | ||
104 | |||
105 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; | ||
106 | tx_cmd->tid_tspec = (control & | ||
107 | IEEE80211_BAR_CTRL_TID_INFO_MASK) >> | ||
108 | IEEE80211_BAR_CTRL_TID_INFO_SHIFT; | ||
109 | WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); | ||
103 | } else { | 110 | } else { |
104 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; | 111 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; |
105 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) | 112 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 9259a732e8a4..12f9e2708afb 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
483 | * better enable it. The long term solution would be to use just a | 483 | * better enable it. The long term solution would be to use just a |
484 | * bunch of valid page descriptors, without dependency on ballooning | 484 | * bunch of valid page descriptors, without dependency on ballooning |
485 | */ | 485 | */ |
486 | err = alloc_xenballooned_pages(MAX_PENDING_REQS, | 486 | err = gnttab_alloc_pages(MAX_PENDING_REQS, |
487 | queue->mmap_pages, | 487 | queue->mmap_pages); |
488 | false); | ||
489 | if (err) { | 488 | if (err) { |
490 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); | 489 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); |
491 | return -ENOMEM; | 490 | return -ENOMEM; |
@@ -578,6 +577,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
578 | goto err_rx_unbind; | 577 | goto err_rx_unbind; |
579 | } | 578 | } |
580 | queue->task = task; | 579 | queue->task = task; |
580 | get_task_struct(task); | ||
581 | 581 | ||
582 | task = kthread_create(xenvif_dealloc_kthread, | 582 | task = kthread_create(xenvif_dealloc_kthread, |
583 | (void *)queue, "%s-dealloc", queue->name); | 583 | (void *)queue, "%s-dealloc", queue->name); |
@@ -634,6 +634,7 @@ void xenvif_disconnect(struct xenvif *vif) | |||
634 | 634 | ||
635 | if (queue->task) { | 635 | if (queue->task) { |
636 | kthread_stop(queue->task); | 636 | kthread_stop(queue->task); |
637 | put_task_struct(queue->task); | ||
637 | queue->task = NULL; | 638 | queue->task = NULL; |
638 | } | 639 | } |
639 | 640 | ||
@@ -662,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif) | |||
662 | */ | 663 | */ |
663 | void xenvif_deinit_queue(struct xenvif_queue *queue) | 664 | void xenvif_deinit_queue(struct xenvif_queue *queue) |
664 | { | 665 | { |
665 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | 666 | gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); |
666 | } | 667 | } |
667 | 668 | ||
668 | void xenvif_free(struct xenvif *vif) | 669 | void xenvif_free(struct xenvif *vif) |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 908e65e9b821..7dc2d64db3cb 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, | |||
314 | static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, | 314 | static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, |
315 | struct netrx_pending_operations *npo, | 315 | struct netrx_pending_operations *npo, |
316 | struct page *page, unsigned long size, | 316 | struct page *page, unsigned long size, |
317 | unsigned long offset, int *head, | 317 | unsigned long offset, int *head) |
318 | struct xenvif_queue *foreign_queue, | ||
319 | grant_ref_t foreign_gref) | ||
320 | { | 318 | { |
321 | struct gnttab_copy *copy_gop; | 319 | struct gnttab_copy *copy_gop; |
322 | struct xenvif_rx_meta *meta; | 320 | struct xenvif_rx_meta *meta; |
@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb | |||
333 | offset &= ~PAGE_MASK; | 331 | offset &= ~PAGE_MASK; |
334 | 332 | ||
335 | while (size > 0) { | 333 | while (size > 0) { |
334 | struct xen_page_foreign *foreign; | ||
335 | |||
336 | BUG_ON(offset >= PAGE_SIZE); | 336 | BUG_ON(offset >= PAGE_SIZE); |
337 | BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); | 337 | BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); |
338 | 338 | ||
@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb | |||
361 | copy_gop->flags = GNTCOPY_dest_gref; | 361 | copy_gop->flags = GNTCOPY_dest_gref; |
362 | copy_gop->len = bytes; | 362 | copy_gop->len = bytes; |
363 | 363 | ||
364 | if (foreign_queue) { | 364 | foreign = xen_page_foreign(page); |
365 | copy_gop->source.domid = foreign_queue->vif->domid; | 365 | if (foreign) { |
366 | copy_gop->source.u.ref = foreign_gref; | 366 | copy_gop->source.domid = foreign->domid; |
367 | copy_gop->source.u.ref = foreign->gref; | ||
367 | copy_gop->flags |= GNTCOPY_source_gref; | 368 | copy_gop->flags |= GNTCOPY_source_gref; |
368 | } else { | 369 | } else { |
369 | copy_gop->source.domid = DOMID_SELF; | 370 | copy_gop->source.domid = DOMID_SELF; |
@@ -406,35 +407,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb | |||
406 | } | 407 | } |
407 | 408 | ||
408 | /* | 409 | /* |
409 | * Find the grant ref for a given frag in a chain of struct ubuf_info's | ||
410 | * skb: the skb itself | ||
411 | * i: the frag's number | ||
412 | * ubuf: a pointer to an element in the chain. It should not be NULL | ||
413 | * | ||
414 | * Returns a pointer to the element in the chain where the page were found. If | ||
415 | * not found, returns NULL. | ||
416 | * See the definition of callback_struct in common.h for more details about | ||
417 | * the chain. | ||
418 | */ | ||
419 | static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, | ||
420 | const int i, | ||
421 | const struct ubuf_info *ubuf) | ||
422 | { | ||
423 | struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf); | ||
424 | |||
425 | do { | ||
426 | u16 pending_idx = ubuf->desc; | ||
427 | |||
428 | if (skb_shinfo(skb)->frags[i].page.p == | ||
429 | foreign_queue->mmap_pages[pending_idx]) | ||
430 | break; | ||
431 | ubuf = (struct ubuf_info *) ubuf->ctx; | ||
432 | } while (ubuf); | ||
433 | |||
434 | return ubuf; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Prepare an SKB to be transmitted to the frontend. | 410 | * Prepare an SKB to be transmitted to the frontend. |
439 | * | 411 | * |
440 | * This function is responsible for allocating grant operations, meta | 412 | * This function is responsible for allocating grant operations, meta |
@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
459 | int head = 1; | 431 | int head = 1; |
460 | int old_meta_prod; | 432 | int old_meta_prod; |
461 | int gso_type; | 433 | int gso_type; |
462 | const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; | ||
463 | const struct ubuf_info *const head_ubuf = ubuf; | ||
464 | 434 | ||
465 | old_meta_prod = npo->meta_prod; | 435 | old_meta_prod = npo->meta_prod; |
466 | 436 | ||
@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
507 | len = skb_tail_pointer(skb) - data; | 477 | len = skb_tail_pointer(skb) - data; |
508 | 478 | ||
509 | xenvif_gop_frag_copy(queue, skb, npo, | 479 | xenvif_gop_frag_copy(queue, skb, npo, |
510 | virt_to_page(data), len, offset, &head, | 480 | virt_to_page(data), len, offset, &head); |
511 | NULL, | ||
512 | 0); | ||
513 | data += len; | 481 | data += len; |
514 | } | 482 | } |
515 | 483 | ||
516 | for (i = 0; i < nr_frags; i++) { | 484 | for (i = 0; i < nr_frags; i++) { |
517 | /* This variable also signals whether foreign_gref has a real | ||
518 | * value or not. | ||
519 | */ | ||
520 | struct xenvif_queue *foreign_queue = NULL; | ||
521 | grant_ref_t foreign_gref; | ||
522 | |||
523 | if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && | ||
524 | (ubuf->callback == &xenvif_zerocopy_callback)) { | ||
525 | const struct ubuf_info *const startpoint = ubuf; | ||
526 | |||
527 | /* Ideally ubuf points to the chain element which | ||
528 | * belongs to this frag. Or if frags were removed from | ||
529 | * the beginning, then shortly before it. | ||
530 | */ | ||
531 | ubuf = xenvif_find_gref(skb, i, ubuf); | ||
532 | |||
533 | /* Try again from the beginning of the list, if we | ||
534 | * haven't tried from there. This only makes sense in | ||
535 | * the unlikely event of reordering the original frags. | ||
536 | * For injected local pages it's an unnecessary second | ||
537 | * run. | ||
538 | */ | ||
539 | if (unlikely(!ubuf) && startpoint != head_ubuf) | ||
540 | ubuf = xenvif_find_gref(skb, i, head_ubuf); | ||
541 | |||
542 | if (likely(ubuf)) { | ||
543 | u16 pending_idx = ubuf->desc; | ||
544 | |||
545 | foreign_queue = ubuf_to_queue(ubuf); | ||
546 | foreign_gref = | ||
547 | foreign_queue->pending_tx_info[pending_idx].req.gref; | ||
548 | /* Just a safety measure. If this was the last | ||
549 | * element on the list, the for loop will | ||
550 | * iterate again if a local page were added to | ||
551 | * the end. Using head_ubuf here prevents the | ||
552 | * second search on the chain. Or the original | ||
553 | * frags changed order, but that's less likely. | ||
554 | * In any way, ubuf shouldn't be NULL. | ||
555 | */ | ||
556 | ubuf = ubuf->ctx ? | ||
557 | (struct ubuf_info *) ubuf->ctx : | ||
558 | head_ubuf; | ||
559 | } else | ||
560 | /* This frag was a local page, added to the | ||
561 | * array after the skb left netback. | ||
562 | */ | ||
563 | ubuf = head_ubuf; | ||
564 | } | ||
565 | xenvif_gop_frag_copy(queue, skb, npo, | 485 | xenvif_gop_frag_copy(queue, skb, npo, |
566 | skb_frag_page(&skb_shinfo(skb)->frags[i]), | 486 | skb_frag_page(&skb_shinfo(skb)->frags[i]), |
567 | skb_frag_size(&skb_shinfo(skb)->frags[i]), | 487 | skb_frag_size(&skb_shinfo(skb)->frags[i]), |
568 | skb_shinfo(skb)->frags[i].page_offset, | 488 | skb_shinfo(skb)->frags[i].page_offset, |
569 | &head, | 489 | &head); |
570 | foreign_queue, | ||
571 | foreign_queue ? foreign_gref : UINT_MAX); | ||
572 | } | 490 | } |
573 | 491 | ||
574 | return npo->meta_prod - old_meta_prod; | 492 | return npo->meta_prod - old_meta_prod; |
@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) | |||
1241 | /* Take an extra reference to offset network stack's put_page */ | 1159 | /* Take an extra reference to offset network stack's put_page */ |
1242 | get_page(queue->mmap_pages[pending_idx]); | 1160 | get_page(queue->mmap_pages[pending_idx]); |
1243 | } | 1161 | } |
1244 | /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc | ||
1245 | * overlaps with "index", and "mapping" is not set. I think mapping | ||
1246 | * should be set. If delivered to local stack, it would drop this | ||
1247 | * skb in sk_filter unless the socket has the right to use it. | ||
1248 | */ | ||
1249 | skb->pfmemalloc = false; | ||
1250 | } | 1162 | } |
1251 | 1163 | ||
1252 | static int xenvif_get_extras(struct xenvif_queue *queue, | 1164 | static int xenvif_get_extras(struct xenvif_queue *queue, |
@@ -2109,8 +2021,7 @@ int xenvif_kthread_guest_rx(void *data) | |||
2109 | */ | 2021 | */ |
2110 | if (unlikely(vif->disabled && queue->id == 0)) { | 2022 | if (unlikely(vif->disabled && queue->id == 0)) { |
2111 | xenvif_carrier_off(vif); | 2023 | xenvif_carrier_off(vif); |
2112 | xenvif_rx_queue_purge(queue); | 2024 | break; |
2113 | continue; | ||
2114 | } | 2025 | } |
2115 | 2026 | ||
2116 | if (!skb_queue_empty(&queue->rx_queue)) | 2027 | if (!skb_queue_empty(&queue->rx_queue)) |
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index 88471d3d98cd..110fece2ff53 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c | |||
@@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, | |||
140 | unsigned char busno, unsigned char bus_max, | 140 | unsigned char busno, unsigned char bus_max, |
141 | struct list_head *resources, resource_size_t *io_base) | 141 | struct list_head *resources, resource_size_t *io_base) |
142 | { | 142 | { |
143 | struct resource_entry *window; | ||
143 | struct resource *res; | 144 | struct resource *res; |
144 | struct resource *bus_range; | 145 | struct resource *bus_range; |
145 | struct of_pci_range range; | 146 | struct of_pci_range range; |
@@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, | |||
225 | conversion_failed: | 226 | conversion_failed: |
226 | kfree(res); | 227 | kfree(res); |
227 | parse_failed: | 228 | parse_failed: |
229 | resource_list_for_each_entry(window, resources) | ||
230 | kfree(window->res); | ||
228 | pci_free_resource_list(resources); | 231 | pci_free_resource_list(resources); |
232 | kfree(bus_range); | ||
229 | return err; | 233 | return err; |
230 | } | 234 | } |
231 | EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); | 235 | EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); |
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c index 7ad59ac68cf6..a81cd2a2747f 100644 --- a/drivers/parport/parport_atari.c +++ b/drivers/parport/parport_atari.c | |||
@@ -192,8 +192,8 @@ static int __init parport_atari_init(void) | |||
192 | &parport_atari_ops); | 192 | &parport_atari_ops); |
193 | if (!p) | 193 | if (!p) |
194 | return -ENODEV; | 194 | return -ENODEV; |
195 | if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, | 195 | if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name, |
196 | IRQ_TYPE_SLOW, p->name, p)) { | 196 | p)) { |
197 | parport_put_port (p); | 197 | parport_put_port (p); |
198 | return -ENODEV; | 198 | return -ENODEV; |
199 | } | 199 | } |
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 49dd766852ba..d9b64a175990 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte); | |||
67 | EXPORT_SYMBOL(pci_bus_write_config_word); | 67 | EXPORT_SYMBOL(pci_bus_write_config_word); |
68 | EXPORT_SYMBOL(pci_bus_write_config_dword); | 68 | EXPORT_SYMBOL(pci_bus_write_config_dword); |
69 | 69 | ||
70 | int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, | ||
71 | int where, int size, u32 *val) | ||
72 | { | ||
73 | void __iomem *addr; | ||
74 | |||
75 | addr = bus->ops->map_bus(bus, devfn, where); | ||
76 | if (!addr) { | ||
77 | *val = ~0; | ||
78 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
79 | } | ||
80 | |||
81 | if (size == 1) | ||
82 | *val = readb(addr); | ||
83 | else if (size == 2) | ||
84 | *val = readw(addr); | ||
85 | else | ||
86 | *val = readl(addr); | ||
87 | |||
88 | return PCIBIOS_SUCCESSFUL; | ||
89 | } | ||
90 | EXPORT_SYMBOL_GPL(pci_generic_config_read); | ||
91 | |||
92 | int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, | ||
93 | int where, int size, u32 val) | ||
94 | { | ||
95 | void __iomem *addr; | ||
96 | |||
97 | addr = bus->ops->map_bus(bus, devfn, where); | ||
98 | if (!addr) | ||
99 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
100 | |||
101 | if (size == 1) | ||
102 | writeb(val, addr); | ||
103 | else if (size == 2) | ||
104 | writew(val, addr); | ||
105 | else | ||
106 | writel(val, addr); | ||
107 | |||
108 | return PCIBIOS_SUCCESSFUL; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(pci_generic_config_write); | ||
111 | |||
112 | int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, | ||
113 | int where, int size, u32 *val) | ||
114 | { | ||
115 | void __iomem *addr; | ||
116 | |||
117 | addr = bus->ops->map_bus(bus, devfn, where & ~0x3); | ||
118 | if (!addr) { | ||
119 | *val = ~0; | ||
120 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
121 | } | ||
122 | |||
123 | *val = readl(addr); | ||
124 | |||
125 | if (size <= 2) | ||
126 | *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); | ||
127 | |||
128 | return PCIBIOS_SUCCESSFUL; | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(pci_generic_config_read32); | ||
131 | |||
132 | int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, | ||
133 | int where, int size, u32 val) | ||
134 | { | ||
135 | void __iomem *addr; | ||
136 | u32 mask, tmp; | ||
137 | |||
138 | addr = bus->ops->map_bus(bus, devfn, where & ~0x3); | ||
139 | if (!addr) | ||
140 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
141 | |||
142 | if (size == 4) { | ||
143 | writel(val, addr); | ||
144 | return PCIBIOS_SUCCESSFUL; | ||
145 | } else { | ||
146 | mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); | ||
147 | } | ||
148 | |||
149 | tmp = readl(addr) & mask; | ||
150 | tmp |= val << ((where & 0x3) * 8); | ||
151 | writel(tmp, addr); | ||
152 | |||
153 | return PCIBIOS_SUCCESSFUL; | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(pci_generic_config_write32); | ||
156 | |||
70 | /** | 157 | /** |
71 | * pci_bus_set_ops - Set raw operations of pci bus | 158 | * pci_bus_set_ops - Set raw operations of pci bus |
72 | * @bus: pci bus struct | 159 | * @bus: pci bus struct |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 8fb16188cd82..90fa3a78fb7c 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -20,17 +20,16 @@ | |||
20 | void pci_add_resource_offset(struct list_head *resources, struct resource *res, | 20 | void pci_add_resource_offset(struct list_head *resources, struct resource *res, |
21 | resource_size_t offset) | 21 | resource_size_t offset) |
22 | { | 22 | { |
23 | struct pci_host_bridge_window *window; | 23 | struct resource_entry *entry; |
24 | 24 | ||
25 | window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL); | 25 | entry = resource_list_create_entry(res, 0); |
26 | if (!window) { | 26 | if (!entry) { |
27 | printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); | 27 | printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); |
28 | return; | 28 | return; |
29 | } | 29 | } |
30 | 30 | ||
31 | window->res = res; | 31 | entry->offset = offset; |
32 | window->offset = offset; | 32 | resource_list_add_tail(entry, resources); |
33 | list_add_tail(&window->list, resources); | ||
34 | } | 33 | } |
35 | EXPORT_SYMBOL(pci_add_resource_offset); | 34 | EXPORT_SYMBOL(pci_add_resource_offset); |
36 | 35 | ||
@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource); | |||
42 | 41 | ||
43 | void pci_free_resource_list(struct list_head *resources) | 42 | void pci_free_resource_list(struct list_head *resources) |
44 | { | 43 | { |
45 | struct pci_host_bridge_window *window, *tmp; | 44 | resource_list_free(resources); |
46 | |||
47 | list_for_each_entry_safe(window, tmp, resources, list) { | ||
48 | list_del(&window->list); | ||
49 | kfree(window); | ||
50 | } | ||
51 | } | 45 | } |
52 | EXPORT_SYMBOL(pci_free_resource_list); | 46 | EXPORT_SYMBOL(pci_free_resource_list); |
53 | 47 | ||
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c index 0e5f3c95af5b..39b2dbe585aa 100644 --- a/drivers/pci/host-bridge.c +++ b/drivers/pci/host-bridge.c | |||
@@ -35,10 +35,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, | |||
35 | struct resource *res) | 35 | struct resource *res) |
36 | { | 36 | { |
37 | struct pci_host_bridge *bridge = find_pci_host_bridge(bus); | 37 | struct pci_host_bridge *bridge = find_pci_host_bridge(bus); |
38 | struct pci_host_bridge_window *window; | 38 | struct resource_entry *window; |
39 | resource_size_t offset = 0; | 39 | resource_size_t offset = 0; |
40 | 40 | ||
41 | list_for_each_entry(window, &bridge->windows, list) { | 41 | resource_list_for_each_entry(window, &bridge->windows) { |
42 | if (resource_contains(window->res, res)) { | 42 | if (resource_contains(window->res, res)) { |
43 | offset = window->offset; | 43 | offset = window->offset; |
44 | break; | 44 | break; |
@@ -60,10 +60,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, | |||
60 | struct pci_bus_region *region) | 60 | struct pci_bus_region *region) |
61 | { | 61 | { |
62 | struct pci_host_bridge *bridge = find_pci_host_bridge(bus); | 62 | struct pci_host_bridge *bridge = find_pci_host_bridge(bus); |
63 | struct pci_host_bridge_window *window; | 63 | struct resource_entry *window; |
64 | resource_size_t offset = 0; | 64 | resource_size_t offset = 0; |
65 | 65 | ||
66 | list_for_each_entry(window, &bridge->windows, list) { | 66 | resource_list_for_each_entry(window, &bridge->windows) { |
67 | struct pci_bus_region bus_region; | 67 | struct pci_bus_region bus_region; |
68 | 68 | ||
69 | if (resource_type(res) != resource_type(window->res)) | 69 | if (resource_type(res) != resource_type(window->res)) |
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index c4b6568e486d..7b892a9cc4fc 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
@@ -102,4 +102,8 @@ config PCI_LAYERSCAPE | |||
102 | help | 102 | help |
103 | Say Y here if you want PCIe controller support on Layerscape SoCs. | 103 | Say Y here if you want PCIe controller support on Layerscape SoCs. |
104 | 104 | ||
105 | config PCI_VERSATILE | ||
106 | bool "ARM Versatile PB PCI controller" | ||
107 | depends on ARCH_VERSATILE | ||
108 | |||
105 | endmenu | 109 | endmenu |
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 44c26998027f..e61d91c92bf1 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile | |||
@@ -12,3 +12,4 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o | |||
12 | obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o | 12 | obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o |
13 | obj-$(CONFIG_PCI_XGENE) += pci-xgene.o | 13 | obj-$(CONFIG_PCI_XGENE) += pci-xgene.o |
14 | obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o | 14 | obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o |
15 | obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o | ||
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c index 6eb1aa75bd37..ba46e581db99 100644 --- a/drivers/pci/host/pci-host-generic.c +++ b/drivers/pci/host/pci-host-generic.c | |||
@@ -76,55 +76,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { | |||
76 | .map_bus = gen_pci_map_cfg_bus_ecam, | 76 | .map_bus = gen_pci_map_cfg_bus_ecam, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn, | ||
80 | int where, int size, u32 *val) | ||
81 | { | ||
82 | void __iomem *addr; | ||
83 | struct pci_sys_data *sys = bus->sysdata; | ||
84 | struct gen_pci *pci = sys->private_data; | ||
85 | |||
86 | addr = pci->cfg.ops->map_bus(bus, devfn, where); | ||
87 | |||
88 | switch (size) { | ||
89 | case 1: | ||
90 | *val = readb(addr); | ||
91 | break; | ||
92 | case 2: | ||
93 | *val = readw(addr); | ||
94 | break; | ||
95 | default: | ||
96 | *val = readl(addr); | ||
97 | } | ||
98 | |||
99 | return PCIBIOS_SUCCESSFUL; | ||
100 | } | ||
101 | |||
102 | static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn, | ||
103 | int where, int size, u32 val) | ||
104 | { | ||
105 | void __iomem *addr; | ||
106 | struct pci_sys_data *sys = bus->sysdata; | ||
107 | struct gen_pci *pci = sys->private_data; | ||
108 | |||
109 | addr = pci->cfg.ops->map_bus(bus, devfn, where); | ||
110 | |||
111 | switch (size) { | ||
112 | case 1: | ||
113 | writeb(val, addr); | ||
114 | break; | ||
115 | case 2: | ||
116 | writew(val, addr); | ||
117 | break; | ||
118 | default: | ||
119 | writel(val, addr); | ||
120 | } | ||
121 | |||
122 | return PCIBIOS_SUCCESSFUL; | ||
123 | } | ||
124 | |||
125 | static struct pci_ops gen_pci_ops = { | 79 | static struct pci_ops gen_pci_ops = { |
126 | .read = gen_pci_config_read, | 80 | .read = pci_generic_config_read, |
127 | .write = gen_pci_config_write, | 81 | .write = pci_generic_config_write, |
128 | }; | 82 | }; |
129 | 83 | ||
130 | static const struct of_device_id gen_pci_of_match[] = { | 84 | static const struct of_device_id gen_pci_of_match[] = { |
@@ -149,14 +103,14 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) | |||
149 | struct device *dev = pci->host.dev.parent; | 103 | struct device *dev = pci->host.dev.parent; |
150 | struct device_node *np = dev->of_node; | 104 | struct device_node *np = dev->of_node; |
151 | resource_size_t iobase; | 105 | resource_size_t iobase; |
152 | struct pci_host_bridge_window *win; | 106 | struct resource_entry *win; |
153 | 107 | ||
154 | err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, | 108 | err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, |
155 | &iobase); | 109 | &iobase); |
156 | if (err) | 110 | if (err) |
157 | return err; | 111 | return err; |
158 | 112 | ||
159 | list_for_each_entry(win, &pci->resources, list) { | 113 | resource_list_for_each_entry(win, &pci->resources) { |
160 | struct resource *parent, *res = win->res; | 114 | struct resource *parent, *res = win->res; |
161 | 115 | ||
162 | switch (resource_type(res)) { | 116 | switch (resource_type(res)) { |
@@ -287,6 +241,7 @@ static int gen_pci_probe(struct platform_device *pdev) | |||
287 | 241 | ||
288 | of_id = of_match_node(gen_pci_of_match, np); | 242 | of_id = of_match_node(gen_pci_of_match, np); |
289 | pci->cfg.ops = of_id->data; | 243 | pci->cfg.ops = of_id->data; |
244 | gen_pci_ops.map_bus = pci->cfg.ops->map_bus; | ||
290 | pci->host.dev.parent = dev; | 245 | pci->host.dev.parent = dev; |
291 | INIT_LIST_HEAD(&pci->host.windows); | 246 | INIT_LIST_HEAD(&pci->host.windows); |
292 | INIT_LIST_HEAD(&pci->resources); | 247 | INIT_LIST_HEAD(&pci->resources); |
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 78f79e31ac5c..75333b0c4f0a 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c | |||
@@ -119,7 +119,7 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
119 | struct pcie_port *pp = &ks_pcie->pp; | 119 | struct pcie_port *pp = &ks_pcie->pp; |
120 | struct irq_chip *chip = irq_desc_get_chip(desc); | 120 | struct irq_chip *chip = irq_desc_get_chip(desc); |
121 | 121 | ||
122 | dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq); | 122 | dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq); |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * The chained irq handler installation would have replaced normal | 125 | * The chained irq handler installation would have replaced normal |
@@ -197,7 +197,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | |||
197 | */ | 197 | */ |
198 | for (temp = 0; temp < max_host_irqs; temp++) { | 198 | for (temp = 0; temp < max_host_irqs; temp++) { |
199 | host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); | 199 | host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); |
200 | if (host_irqs[temp] < 0) | 200 | if (!host_irqs[temp]) |
201 | break; | 201 | break; |
202 | } | 202 | } |
203 | if (temp) { | 203 | if (temp) { |
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 6697b1a4d4fa..68c9e5e9b0a8 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c | |||
@@ -167,7 +167,6 @@ MODULE_DEVICE_TABLE(of, ls_pcie_of_match); | |||
167 | static struct platform_driver ls_pcie_driver = { | 167 | static struct platform_driver ls_pcie_driver = { |
168 | .driver = { | 168 | .driver = { |
169 | .name = "layerscape-pcie", | 169 | .name = "layerscape-pcie", |
170 | .owner = THIS_MODULE, | ||
171 | .of_match_table = ls_pcie_of_match, | 170 | .of_match_table = ls_pcie_of_match, |
172 | }, | 171 | }, |
173 | }; | 172 | }; |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 1dd759596b0a..1309cfbaa719 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -101,9 +101,7 @@ struct mvebu_pcie { | |||
101 | struct mvebu_pcie_port *ports; | 101 | struct mvebu_pcie_port *ports; |
102 | struct msi_controller *msi; | 102 | struct msi_controller *msi; |
103 | struct resource io; | 103 | struct resource io; |
104 | char io_name[30]; | ||
105 | struct resource realio; | 104 | struct resource realio; |
106 | char mem_name[30]; | ||
107 | struct resource mem; | 105 | struct resource mem; |
108 | struct resource busn; | 106 | struct resource busn; |
109 | int nports; | 107 | int nports; |
@@ -723,18 +721,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | |||
723 | { | 721 | { |
724 | struct mvebu_pcie *pcie = sys_to_pcie(sys); | 722 | struct mvebu_pcie *pcie = sys_to_pcie(sys); |
725 | int i; | 723 | int i; |
726 | int domain = 0; | ||
727 | 724 | ||
728 | #ifdef CONFIG_PCI_DOMAINS | 725 | pcie->mem.name = "PCI MEM"; |
729 | domain = sys->domain; | 726 | pcie->realio.name = "PCI I/O"; |
730 | #endif | ||
731 | |||
732 | snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x", | ||
733 | domain); | ||
734 | pcie->mem.name = pcie->mem_name; | ||
735 | |||
736 | snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain); | ||
737 | pcie->realio.name = pcie->io_name; | ||
738 | 727 | ||
739 | if (request_resource(&iomem_resource, &pcie->mem)) | 728 | if (request_resource(&iomem_resource, &pcie->mem)) |
740 | return 0; | 729 | return 0; |
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index d9c042febb1a..dd6b84e6206c 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c | |||
@@ -131,52 +131,6 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, | |||
131 | return priv->reg + (slot >> 1) * 0x100 + where; | 131 | return priv->reg + (slot >> 1) * 0x100 + where; |
132 | } | 132 | } |
133 | 133 | ||
134 | static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn, | ||
135 | int where, int size, u32 *val) | ||
136 | { | ||
137 | void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where); | ||
138 | |||
139 | if (!reg) | ||
140 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
141 | |||
142 | switch (size) { | ||
143 | case 1: | ||
144 | *val = ioread8(reg); | ||
145 | break; | ||
146 | case 2: | ||
147 | *val = ioread16(reg); | ||
148 | break; | ||
149 | default: | ||
150 | *val = ioread32(reg); | ||
151 | break; | ||
152 | } | ||
153 | |||
154 | return PCIBIOS_SUCCESSFUL; | ||
155 | } | ||
156 | |||
157 | static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn, | ||
158 | int where, int size, u32 val) | ||
159 | { | ||
160 | void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where); | ||
161 | |||
162 | if (!reg) | ||
163 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
164 | |||
165 | switch (size) { | ||
166 | case 1: | ||
167 | iowrite8(val, reg); | ||
168 | break; | ||
169 | case 2: | ||
170 | iowrite16(val, reg); | ||
171 | break; | ||
172 | default: | ||
173 | iowrite32(val, reg); | ||
174 | break; | ||
175 | } | ||
176 | |||
177 | return PCIBIOS_SUCCESSFUL; | ||
178 | } | ||
179 | |||
180 | /* PCI interrupt mapping */ | 134 | /* PCI interrupt mapping */ |
181 | static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 135 | static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
182 | { | 136 | { |
@@ -325,8 +279,9 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) | |||
325 | } | 279 | } |
326 | 280 | ||
327 | static struct pci_ops rcar_pci_ops = { | 281 | static struct pci_ops rcar_pci_ops = { |
328 | .read = rcar_pci_read_config, | 282 | .map_bus = rcar_pci_cfg_base, |
329 | .write = rcar_pci_write_config, | 283 | .read = pci_generic_config_read, |
284 | .write = pci_generic_config_write, | ||
330 | }; | 285 | }; |
331 | 286 | ||
332 | static int rcar_pci_probe(struct platform_device *pdev) | 287 | static int rcar_pci_probe(struct platform_device *pdev) |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index a800ae916394..00e92720d7f7 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
@@ -480,59 +480,10 @@ static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus, | |||
480 | return addr; | 480 | return addr; |
481 | } | 481 | } |
482 | 482 | ||
483 | static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, | ||
484 | int where, int size, u32 *value) | ||
485 | { | ||
486 | void __iomem *addr; | ||
487 | |||
488 | addr = tegra_pcie_conf_address(bus, devfn, where); | ||
489 | if (!addr) { | ||
490 | *value = 0xffffffff; | ||
491 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
492 | } | ||
493 | |||
494 | *value = readl(addr); | ||
495 | |||
496 | if (size == 1) | ||
497 | *value = (*value >> (8 * (where & 3))) & 0xff; | ||
498 | else if (size == 2) | ||
499 | *value = (*value >> (8 * (where & 3))) & 0xffff; | ||
500 | |||
501 | return PCIBIOS_SUCCESSFUL; | ||
502 | } | ||
503 | |||
504 | static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, | ||
505 | int where, int size, u32 value) | ||
506 | { | ||
507 | void __iomem *addr; | ||
508 | u32 mask, tmp; | ||
509 | |||
510 | addr = tegra_pcie_conf_address(bus, devfn, where); | ||
511 | if (!addr) | ||
512 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
513 | |||
514 | if (size == 4) { | ||
515 | writel(value, addr); | ||
516 | return PCIBIOS_SUCCESSFUL; | ||
517 | } | ||
518 | |||
519 | if (size == 2) | ||
520 | mask = ~(0xffff << ((where & 0x3) * 8)); | ||
521 | else if (size == 1) | ||
522 | mask = ~(0xff << ((where & 0x3) * 8)); | ||
523 | else | ||
524 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
525 | |||
526 | tmp = readl(addr) & mask; | ||
527 | tmp |= value << ((where & 0x3) * 8); | ||
528 | writel(tmp, addr); | ||
529 | |||
530 | return PCIBIOS_SUCCESSFUL; | ||
531 | } | ||
532 | |||
533 | static struct pci_ops tegra_pcie_ops = { | 483 | static struct pci_ops tegra_pcie_ops = { |
534 | .read = tegra_pcie_read_conf, | 484 | .map_bus = tegra_pcie_conf_address, |
535 | .write = tegra_pcie_write_conf, | 485 | .read = pci_generic_config_read32, |
486 | .write = pci_generic_config_write32, | ||
536 | }; | 487 | }; |
537 | 488 | ||
538 | static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) | 489 | static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) |
@@ -625,19 +576,6 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port) | |||
625 | devm_kfree(pcie->dev, port); | 576 | devm_kfree(pcie->dev, port); |
626 | } | 577 | } |
627 | 578 | ||
628 | static void tegra_pcie_fixup_bridge(struct pci_dev *dev) | ||
629 | { | ||
630 | u16 reg; | ||
631 | |||
632 | if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) { | ||
633 | pci_read_config_word(dev, PCI_COMMAND, ®); | ||
634 | reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | ||
635 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR); | ||
636 | pci_write_config_word(dev, PCI_COMMAND, reg); | ||
637 | } | ||
638 | } | ||
639 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge); | ||
640 | |||
641 | /* Tegra PCIE root complex wrongly reports device class */ | 579 | /* Tegra PCIE root complex wrongly reports device class */ |
642 | static void tegra_pcie_fixup_class(struct pci_dev *dev) | 580 | static void tegra_pcie_fixup_class(struct pci_dev *dev) |
643 | { | 581 | { |
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c new file mode 100644 index 000000000000..1ec694a52379 --- /dev/null +++ b/drivers/pci/host/pci-versatile.c | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Copyright 2004 Koninklijke Philips Electronics NV | ||
3 | * | ||
4 | * Conversion to platform driver and DT: | ||
5 | * Copyright 2014 Linaro Ltd. | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * 14/04/2005 Initial version, colin.king@philips.com | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of_address.h> | ||
21 | #include <linux/of_pci.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | static void __iomem *versatile_pci_base; | ||
27 | static void __iomem *versatile_cfg_base[2]; | ||
28 | |||
29 | #define PCI_IMAP(m) (versatile_pci_base + ((m) * 4)) | ||
30 | #define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4)) | ||
31 | #define PCI_SELFID (versatile_pci_base + 0xc) | ||
32 | |||
33 | #define VP_PCI_DEVICE_ID 0x030010ee | ||
34 | #define VP_PCI_CLASS_ID 0x0b400000 | ||
35 | |||
36 | static u32 pci_slot_ignore; | ||
37 | |||
38 | static int __init versatile_pci_slot_ignore(char *str) | ||
39 | { | ||
40 | int retval; | ||
41 | int slot; | ||
42 | |||
43 | while ((retval = get_option(&str, &slot))) { | ||
44 | if ((slot < 0) || (slot > 31)) | ||
45 | pr_err("Illegal slot value: %d\n", slot); | ||
46 | else | ||
47 | pci_slot_ignore |= (1 << slot); | ||
48 | } | ||
49 | return 1; | ||
50 | } | ||
51 | __setup("pci_slot_ignore=", versatile_pci_slot_ignore); | ||
52 | |||
53 | |||
54 | static void __iomem *versatile_map_bus(struct pci_bus *bus, | ||
55 | unsigned int devfn, int offset) | ||
56 | { | ||
57 | unsigned int busnr = bus->number; | ||
58 | |||
59 | if (pci_slot_ignore & (1 << PCI_SLOT(devfn))) | ||
60 | return NULL; | ||
61 | |||
62 | return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset); | ||
63 | } | ||
64 | |||
65 | static struct pci_ops pci_versatile_ops = { | ||
66 | .map_bus = versatile_map_bus, | ||
67 | .read = pci_generic_config_read32, | ||
68 | .write = pci_generic_config_write, | ||
69 | }; | ||
70 | |||
71 | static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, | ||
72 | struct list_head *res) | ||
73 | { | ||
74 | int err, mem = 1, res_valid = 0; | ||
75 | struct device_node *np = dev->of_node; | ||
76 | resource_size_t iobase; | ||
77 | struct resource_entry *win; | ||
78 | |||
79 | err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase); | ||
80 | if (err) | ||
81 | return err; | ||
82 | |||
83 | resource_list_for_each_entry(win, res, list) { | ||
84 | struct resource *parent, *res = win->res; | ||
85 | |||
86 | switch (resource_type(res)) { | ||
87 | case IORESOURCE_IO: | ||
88 | parent = &ioport_resource; | ||
89 | err = pci_remap_iospace(res, iobase); | ||
90 | if (err) { | ||
91 | dev_warn(dev, "error %d: failed to map resource %pR\n", | ||
92 | err, res); | ||
93 | continue; | ||
94 | } | ||
95 | break; | ||
96 | case IORESOURCE_MEM: | ||
97 | parent = &iomem_resource; | ||
98 | res_valid |= !(res->flags & IORESOURCE_PREFETCH); | ||
99 | |||
100 | writel(res->start >> 28, PCI_IMAP(mem)); | ||
101 | writel(PHYS_OFFSET >> 28, PCI_SMAP(mem)); | ||
102 | mem++; | ||
103 | |||
104 | break; | ||
105 | case IORESOURCE_BUS: | ||
106 | default: | ||
107 | continue; | ||
108 | } | ||
109 | |||
110 | err = devm_request_resource(dev, parent, res); | ||
111 | if (err) | ||
112 | goto out_release_res; | ||
113 | } | ||
114 | |||
115 | if (!res_valid) { | ||
116 | dev_err(dev, "non-prefetchable memory resource required\n"); | ||
117 | err = -EINVAL; | ||
118 | goto out_release_res; | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | |||
123 | out_release_res: | ||
124 | pci_free_resource_list(res); | ||
125 | return err; | ||
126 | } | ||
127 | |||
128 | /* Unused, temporary to satisfy ARM arch code */ | ||
129 | struct pci_sys_data sys; | ||
130 | |||
131 | static int versatile_pci_probe(struct platform_device *pdev) | ||
132 | { | ||
133 | struct resource *res; | ||
134 | int ret, i, myslot = -1; | ||
135 | u32 val; | ||
136 | void __iomem *local_pci_cfg_base; | ||
137 | struct pci_bus *bus; | ||
138 | LIST_HEAD(pci_res); | ||
139 | |||
140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
141 | if (!res) | ||
142 | return -ENODEV; | ||
143 | versatile_pci_base = devm_ioremap_resource(&pdev->dev, res); | ||
144 | |||
145 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
146 | if (!res) | ||
147 | return -ENODEV; | ||
148 | versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res); | ||
149 | |||
150 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
151 | if (!res) | ||
152 | return -ENODEV; | ||
153 | versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res); | ||
154 | |||
155 | ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res); | ||
156 | if (ret) | ||
157 | return ret; | ||
158 | |||
159 | /* | ||
160 | * We need to discover the PCI core first to configure itself | ||
161 | * before the main PCI probing is performed | ||
162 | */ | ||
163 | for (i = 0; i < 32; i++) { | ||
164 | if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) && | ||
165 | (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) { | ||
166 | myslot = i; | ||
167 | break; | ||
168 | } | ||
169 | } | ||
170 | if (myslot == -1) { | ||
171 | dev_err(&pdev->dev, "Cannot find PCI core!\n"); | ||
172 | return -EIO; | ||
173 | } | ||
174 | /* | ||
175 | * Do not to map Versatile FPGA PCI device into memory space | ||
176 | */ | ||
177 | pci_slot_ignore |= (1 << myslot); | ||
178 | |||
179 | dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot); | ||
180 | |||
181 | writel(myslot, PCI_SELFID); | ||
182 | local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); | ||
183 | |||
184 | val = readl(local_pci_cfg_base + PCI_COMMAND); | ||
185 | val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; | ||
186 | writel(val, local_pci_cfg_base + PCI_COMMAND); | ||
187 | |||
188 | /* | ||
189 | * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM | ||
190 | */ | ||
191 | writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0); | ||
192 | writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1); | ||
193 | writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2); | ||
194 | |||
195 | /* | ||
196 | * For many years the kernel and QEMU were symbiotically buggy | ||
197 | * in that they both assumed the same broken IRQ mapping. | ||
198 | * QEMU therefore attempts to auto-detect old broken kernels | ||
199 | * so that they still work on newer QEMU as they did on old | ||
200 | * QEMU. Since we now use the correct (ie matching-hardware) | ||
201 | * IRQ mapping we write a definitely different value to a | ||
202 | * PCI_INTERRUPT_LINE register to tell QEMU that we expect | ||
203 | * real hardware behaviour and it need not be backwards | ||
204 | * compatible for us. This write is harmless on real hardware. | ||
205 | */ | ||
206 | writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); | ||
207 | |||
208 | pci_add_flags(PCI_ENABLE_PROC_DOMAINS); | ||
209 | pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC); | ||
210 | |||
211 | bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res); | ||
212 | if (!bus) | ||
213 | return -ENOMEM; | ||
214 | |||
215 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | ||
216 | pci_assign_unassigned_bus_resources(bus); | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static const struct of_device_id versatile_pci_of_match[] = { | ||
222 | { .compatible = "arm,versatile-pci", }, | ||
223 | { }, | ||
224 | }; | ||
225 | MODULE_DEVICE_TABLE(of, versatile_pci_of_match); | ||
226 | |||
227 | static struct platform_driver versatile_pci_driver = { | ||
228 | .driver = { | ||
229 | .name = "versatile-pci", | ||
230 | .of_match_table = versatile_pci_of_match, | ||
231 | }, | ||
232 | .probe = versatile_pci_probe, | ||
233 | }; | ||
234 | module_platform_driver(versatile_pci_driver); | ||
235 | |||
236 | MODULE_DESCRIPTION("Versatile PCI driver"); | ||
237 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index b1d0596457c5..aab55474dd0d 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | #include <linux/clk-private.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
@@ -74,92 +74,6 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags) | |||
74 | return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; | 74 | return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; |
75 | } | 75 | } |
76 | 76 | ||
77 | /* PCIe Configuration Out/In */ | ||
78 | static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val) | ||
79 | { | ||
80 | writel(val, addr + offset); | ||
81 | } | ||
82 | |||
83 | static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val) | ||
84 | { | ||
85 | u32 val32 = readl(addr + (offset & ~0x3)); | ||
86 | |||
87 | switch (offset & 0x3) { | ||
88 | case 2: | ||
89 | val32 &= ~0xFFFF0000; | ||
90 | val32 |= (u32)val << 16; | ||
91 | break; | ||
92 | case 0: | ||
93 | default: | ||
94 | val32 &= ~0xFFFF; | ||
95 | val32 |= val; | ||
96 | break; | ||
97 | } | ||
98 | writel(val32, addr + (offset & ~0x3)); | ||
99 | } | ||
100 | |||
101 | static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val) | ||
102 | { | ||
103 | u32 val32 = readl(addr + (offset & ~0x3)); | ||
104 | |||
105 | switch (offset & 0x3) { | ||
106 | case 0: | ||
107 | val32 &= ~0xFF; | ||
108 | val32 |= val; | ||
109 | break; | ||
110 | case 1: | ||
111 | val32 &= ~0xFF00; | ||
112 | val32 |= (u32)val << 8; | ||
113 | break; | ||
114 | case 2: | ||
115 | val32 &= ~0xFF0000; | ||
116 | val32 |= (u32)val << 16; | ||
117 | break; | ||
118 | case 3: | ||
119 | default: | ||
120 | val32 &= ~0xFF000000; | ||
121 | val32 |= (u32)val << 24; | ||
122 | break; | ||
123 | } | ||
124 | writel(val32, addr + (offset & ~0x3)); | ||
125 | } | ||
126 | |||
127 | static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val) | ||
128 | { | ||
129 | *val = readl(addr + offset); | ||
130 | } | ||
131 | |||
132 | static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val) | ||
133 | { | ||
134 | *val = readl(addr + (offset & ~0x3)); | ||
135 | |||
136 | switch (offset & 0x3) { | ||
137 | case 2: | ||
138 | *val >>= 16; | ||
139 | break; | ||
140 | } | ||
141 | |||
142 | *val &= 0xFFFF; | ||
143 | } | ||
144 | |||
145 | static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val) | ||
146 | { | ||
147 | *val = readl(addr + (offset & ~0x3)); | ||
148 | |||
149 | switch (offset & 0x3) { | ||
150 | case 3: | ||
151 | *val = *val >> 24; | ||
152 | break; | ||
153 | case 2: | ||
154 | *val = *val >> 16; | ||
155 | break; | ||
156 | case 1: | ||
157 | *val = *val >> 8; | ||
158 | break; | ||
159 | } | ||
160 | *val &= 0xFF; | ||
161 | } | ||
162 | |||
163 | /* | 77 | /* |
164 | * When the address bit [17:16] is 2'b01, the Configuration access will be | 78 | * When the address bit [17:16] is 2'b01, the Configuration access will be |
165 | * treated as Type 1 and it will be forwarded to external PCIe device. | 79 | * treated as Type 1 and it will be forwarded to external PCIe device. |
@@ -213,69 +127,23 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) | |||
213 | return false; | 127 | return false; |
214 | } | 128 | } |
215 | 129 | ||
216 | static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn, | 130 | static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, |
217 | int offset, int len, u32 *val) | 131 | int offset) |
218 | { | ||
219 | struct xgene_pcie_port *port = bus->sysdata; | ||
220 | void __iomem *addr; | ||
221 | |||
222 | if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) | ||
223 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
224 | |||
225 | if (xgene_pcie_hide_rc_bars(bus, offset)) { | ||
226 | *val = 0; | ||
227 | return PCIBIOS_SUCCESSFUL; | ||
228 | } | ||
229 | |||
230 | xgene_pcie_set_rtdid_reg(bus, devfn); | ||
231 | addr = xgene_pcie_get_cfg_base(bus); | ||
232 | switch (len) { | ||
233 | case 1: | ||
234 | xgene_pcie_cfg_in8(addr, offset, val); | ||
235 | break; | ||
236 | case 2: | ||
237 | xgene_pcie_cfg_in16(addr, offset, val); | ||
238 | break; | ||
239 | default: | ||
240 | xgene_pcie_cfg_in32(addr, offset, val); | ||
241 | break; | ||
242 | } | ||
243 | |||
244 | return PCIBIOS_SUCCESSFUL; | ||
245 | } | ||
246 | |||
247 | static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | ||
248 | int offset, int len, u32 val) | ||
249 | { | 132 | { |
250 | struct xgene_pcie_port *port = bus->sysdata; | 133 | struct xgene_pcie_port *port = bus->sysdata; |
251 | void __iomem *addr; | ||
252 | 134 | ||
253 | if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) | 135 | if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up || |
254 | return PCIBIOS_DEVICE_NOT_FOUND; | 136 | xgene_pcie_hide_rc_bars(bus, offset)) |
255 | 137 | return NULL; | |
256 | if (xgene_pcie_hide_rc_bars(bus, offset)) | ||
257 | return PCIBIOS_SUCCESSFUL; | ||
258 | 138 | ||
259 | xgene_pcie_set_rtdid_reg(bus, devfn); | 139 | xgene_pcie_set_rtdid_reg(bus, devfn); |
260 | addr = xgene_pcie_get_cfg_base(bus); | 140 | return xgene_pcie_get_cfg_base(bus); |
261 | switch (len) { | ||
262 | case 1: | ||
263 | xgene_pcie_cfg_out8(addr, offset, (u8)val); | ||
264 | break; | ||
265 | case 2: | ||
266 | xgene_pcie_cfg_out16(addr, offset, (u16)val); | ||
267 | break; | ||
268 | default: | ||
269 | xgene_pcie_cfg_out32(addr, offset, val); | ||
270 | break; | ||
271 | } | ||
272 | |||
273 | return PCIBIOS_SUCCESSFUL; | ||
274 | } | 141 | } |
275 | 142 | ||
276 | static struct pci_ops xgene_pcie_ops = { | 143 | static struct pci_ops xgene_pcie_ops = { |
277 | .read = xgene_pcie_read_config, | 144 | .map_bus = xgene_pcie_map_bus, |
278 | .write = xgene_pcie_write_config | 145 | .read = pci_generic_config_read32, |
146 | .write = pci_generic_config_write32, | ||
279 | }; | 147 | }; |
280 | 148 | ||
281 | static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, | 149 | static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, |
@@ -401,11 +269,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, | |||
401 | struct list_head *res, | 269 | struct list_head *res, |
402 | resource_size_t io_base) | 270 | resource_size_t io_base) |
403 | { | 271 | { |
404 | struct pci_host_bridge_window *window; | 272 | struct resource_entry *window; |
405 | struct device *dev = port->dev; | 273 | struct device *dev = port->dev; |
406 | int ret; | 274 | int ret; |
407 | 275 | ||
408 | list_for_each_entry(window, res, list) { | 276 | resource_list_for_each_entry(window, res) { |
409 | struct resource *res = window->res; | 277 | struct resource *res = window->res; |
410 | u64 restype = resource_type(res); | 278 | u64 restype = resource_type(res); |
411 | 279 | ||
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index df781cdf13c1..1f4ea6f2d910 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c | |||
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, | |||
283 | struct msi_msg msg; | 283 | struct msi_msg msg; |
284 | struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); | 284 | struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); |
285 | 285 | ||
286 | if (desc->msi_attrib.is_msix) | ||
287 | return -EINVAL; | ||
288 | |||
286 | irq = assign_irq(1, desc, &pos); | 289 | irq = assign_irq(1, desc, &pos); |
287 | if (irq < 0) | 290 | if (irq < 0) |
288 | return irq; | 291 | return irq; |
@@ -508,9 +511,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp) | |||
508 | dw_pci.private_data = (void **)&pp; | 511 | dw_pci.private_data = (void **)&pp; |
509 | 512 | ||
510 | pci_common_init_dev(pp->dev, &dw_pci); | 513 | pci_common_init_dev(pp->dev, &dw_pci); |
511 | #ifdef CONFIG_PCI_DOMAINS | ||
512 | dw_pci.domain++; | ||
513 | #endif | ||
514 | 514 | ||
515 | return 0; | 515 | return 0; |
516 | } | 516 | } |
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 748786c402fc..c57bd0ac39a0 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c | |||
@@ -397,9 +397,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie) | |||
397 | #endif | 397 | #endif |
398 | 398 | ||
399 | pci_common_init_dev(&pdev->dev, &rcar_pci); | 399 | pci_common_init_dev(&pdev->dev, &rcar_pci); |
400 | #ifdef CONFIG_PCI_DOMAINS | ||
401 | rcar_pci.domain++; | ||
402 | #endif | ||
403 | } | 400 | } |
404 | 401 | ||
405 | static int phy_wait_for_ack(struct rcar_pcie *pcie) | 402 | static int phy_wait_for_ack(struct rcar_pcie *pcie) |
@@ -757,7 +754,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, | |||
757 | goto err_map_reg; | 754 | goto err_map_reg; |
758 | 755 | ||
759 | i = irq_of_parse_and_map(pdev->dev.of_node, 0); | 756 | i = irq_of_parse_and_map(pdev->dev.of_node, 0); |
760 | if (i < 0) { | 757 | if (!i) { |
761 | dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); | 758 | dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); |
762 | err = -ENOENT; | 759 | err = -ENOENT; |
763 | goto err_map_reg; | 760 | goto err_map_reg; |
@@ -765,7 +762,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev, | |||
765 | pcie->msi.irq1 = i; | 762 | pcie->msi.irq1 = i; |
766 | 763 | ||
767 | i = irq_of_parse_and_map(pdev->dev.of_node, 1); | 764 | i = irq_of_parse_and_map(pdev->dev.of_node, 1); |
768 | if (i < 0) { | 765 | if (!i) { |
769 | dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); | 766 | dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); |
770 | err = -ENOENT; | 767 | err = -ENOENT; |
771 | goto err_map_reg; | 768 | goto err_map_reg; |
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index ef3ebaf9a738..f1a06a091ccb 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c | |||
@@ -148,10 +148,10 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port) | |||
148 | */ | 148 | */ |
149 | static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) | 149 | static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) |
150 | { | 150 | { |
151 | u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR); | 151 | unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR); |
152 | 152 | ||
153 | if (val & XILINX_PCIE_RPEFR_ERR_VALID) { | 153 | if (val & XILINX_PCIE_RPEFR_ERR_VALID) { |
154 | dev_dbg(port->dev, "Requester ID %d\n", | 154 | dev_dbg(port->dev, "Requester ID %lu\n", |
155 | val & XILINX_PCIE_RPEFR_REQ_ID); | 155 | val & XILINX_PCIE_RPEFR_REQ_ID); |
156 | pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, | 156 | pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, |
157 | XILINX_PCIE_REG_RPEFR); | 157 | XILINX_PCIE_REG_RPEFR); |
@@ -189,7 +189,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) | |||
189 | } | 189 | } |
190 | 190 | ||
191 | /** | 191 | /** |
192 | * xilinx_pcie_config_base - Get configuration base | 192 | * xilinx_pcie_map_bus - Get configuration base |
193 | * @bus: PCI Bus structure | 193 | * @bus: PCI Bus structure |
194 | * @devfn: Device/function | 194 | * @devfn: Device/function |
195 | * @where: Offset from base | 195 | * @where: Offset from base |
@@ -197,96 +197,26 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) | |||
197 | * Return: Base address of the configuration space needed to be | 197 | * Return: Base address of the configuration space needed to be |
198 | * accessed. | 198 | * accessed. |
199 | */ | 199 | */ |
200 | static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus, | 200 | static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus, |
201 | unsigned int devfn, int where) | 201 | unsigned int devfn, int where) |
202 | { | 202 | { |
203 | struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); | 203 | struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); |
204 | int relbus; | 204 | int relbus; |
205 | 205 | ||
206 | if (!xilinx_pcie_valid_device(bus, devfn)) | ||
207 | return NULL; | ||
208 | |||
206 | relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | | 209 | relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | |
207 | (devfn << ECAM_DEV_NUM_SHIFT); | 210 | (devfn << ECAM_DEV_NUM_SHIFT); |
208 | 211 | ||
209 | return port->reg_base + relbus + where; | 212 | return port->reg_base + relbus + where; |
210 | } | 213 | } |
211 | 214 | ||
212 | /** | ||
213 | * xilinx_pcie_read_config - Read configuration space | ||
214 | * @bus: PCI Bus structure | ||
215 | * @devfn: Device/function | ||
216 | * @where: Offset from base | ||
217 | * @size: Byte/word/dword | ||
218 | * @val: Value to be read | ||
219 | * | ||
220 | * Return: PCIBIOS_SUCCESSFUL on success | ||
221 | * PCIBIOS_DEVICE_NOT_FOUND on failure | ||
222 | */ | ||
223 | static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, | ||
224 | int where, int size, u32 *val) | ||
225 | { | ||
226 | void __iomem *addr; | ||
227 | |||
228 | if (!xilinx_pcie_valid_device(bus, devfn)) { | ||
229 | *val = 0xFFFFFFFF; | ||
230 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
231 | } | ||
232 | |||
233 | addr = xilinx_pcie_config_base(bus, devfn, where); | ||
234 | |||
235 | switch (size) { | ||
236 | case 1: | ||
237 | *val = readb(addr); | ||
238 | break; | ||
239 | case 2: | ||
240 | *val = readw(addr); | ||
241 | break; | ||
242 | default: | ||
243 | *val = readl(addr); | ||
244 | break; | ||
245 | } | ||
246 | |||
247 | return PCIBIOS_SUCCESSFUL; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * xilinx_pcie_write_config - Write configuration space | ||
252 | * @bus: PCI Bus structure | ||
253 | * @devfn: Device/function | ||
254 | * @where: Offset from base | ||
255 | * @size: Byte/word/dword | ||
256 | * @val: Value to be written to device | ||
257 | * | ||
258 | * Return: PCIBIOS_SUCCESSFUL on success | ||
259 | * PCIBIOS_DEVICE_NOT_FOUND on failure | ||
260 | */ | ||
261 | static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | ||
262 | int where, int size, u32 val) | ||
263 | { | ||
264 | void __iomem *addr; | ||
265 | |||
266 | if (!xilinx_pcie_valid_device(bus, devfn)) | ||
267 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
268 | |||
269 | addr = xilinx_pcie_config_base(bus, devfn, where); | ||
270 | |||
271 | switch (size) { | ||
272 | case 1: | ||
273 | writeb(val, addr); | ||
274 | break; | ||
275 | case 2: | ||
276 | writew(val, addr); | ||
277 | break; | ||
278 | default: | ||
279 | writel(val, addr); | ||
280 | break; | ||
281 | } | ||
282 | |||
283 | return PCIBIOS_SUCCESSFUL; | ||
284 | } | ||
285 | |||
286 | /* PCIe operations */ | 215 | /* PCIe operations */ |
287 | static struct pci_ops xilinx_pcie_ops = { | 216 | static struct pci_ops xilinx_pcie_ops = { |
288 | .read = xilinx_pcie_read_config, | 217 | .map_bus = xilinx_pcie_map_bus, |
289 | .write = xilinx_pcie_write_config, | 218 | .read = pci_generic_config_read, |
219 | .write = pci_generic_config_write, | ||
290 | }; | 220 | }; |
291 | 221 | ||
292 | /* MSI functions */ | 222 | /* MSI functions */ |
@@ -737,7 +667,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) | |||
737 | resource_size_t offset; | 667 | resource_size_t offset; |
738 | struct of_pci_range_parser parser; | 668 | struct of_pci_range_parser parser; |
739 | struct of_pci_range range; | 669 | struct of_pci_range range; |
740 | struct pci_host_bridge_window *win; | 670 | struct resource_entry *win; |
741 | int err = 0, mem_resno = 0; | 671 | int err = 0, mem_resno = 0; |
742 | 672 | ||
743 | /* Get the ranges */ | 673 | /* Get the ranges */ |
@@ -807,7 +737,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) | |||
807 | 737 | ||
808 | free_resources: | 738 | free_resources: |
809 | release_child_resources(&iomem_resource); | 739 | release_child_resources(&iomem_resource); |
810 | list_for_each_entry(win, &port->resources, list) | 740 | resource_list_for_each_entry(win, &port->resources) |
811 | devm_kfree(dev, win->res); | 741 | devm_kfree(dev, win->res); |
812 | pci_free_resource_list(&port->resources); | 742 | pci_free_resource_list(&port->resources); |
813 | 743 | ||
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index a5a7fd8332ac..46db29395a62 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c | |||
@@ -214,8 +214,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot) | |||
214 | 214 | ||
215 | kfree(slot->hotplug_slot->info); | 215 | kfree(slot->hotplug_slot->info); |
216 | kfree(slot->hotplug_slot); | 216 | kfree(slot->hotplug_slot); |
217 | if (slot->dev) | 217 | pci_dev_put(slot->dev); |
218 | pci_dev_put(slot->dev); | ||
219 | kfree(slot); | 218 | kfree(slot); |
220 | } | 219 | } |
221 | 220 | ||
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index ff32e85e1de6..f052e951b23e 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
@@ -532,8 +532,6 @@ static void interrupt_event_handler(struct work_struct *work) | |||
532 | pciehp_green_led_off(p_slot); | 532 | pciehp_green_led_off(p_slot); |
533 | break; | 533 | break; |
534 | case INT_PRESENCE_ON: | 534 | case INT_PRESENCE_ON: |
535 | if (!HP_SUPR_RM(ctrl)) | ||
536 | break; | ||
537 | ctrl_dbg(ctrl, "Surprise Insertion\n"); | 535 | ctrl_dbg(ctrl, "Surprise Insertion\n"); |
538 | handle_surprise_event(p_slot); | 536 | handle_surprise_event(p_slot); |
539 | break; | 537 | break; |
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index bada20999870..c32fb786d48e 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
@@ -475,7 +475,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
475 | struct slot *slot = bss_hotplug_slot->private; | 475 | struct slot *slot = bss_hotplug_slot->private; |
476 | struct pci_dev *dev, *temp; | 476 | struct pci_dev *dev, *temp; |
477 | int rc; | 477 | int rc; |
478 | acpi_owner_id ssdt_id = 0; | 478 | acpi_handle ssdt_hdl = NULL; |
479 | 479 | ||
480 | /* Acquire update access to the bus */ | 480 | /* Acquire update access to the bus */ |
481 | mutex_lock(&sn_hotplug_mutex); | 481 | mutex_lock(&sn_hotplug_mutex); |
@@ -522,7 +522,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
522 | if (ACPI_SUCCESS(ret) && | 522 | if (ACPI_SUCCESS(ret) && |
523 | (adr>>16) == (slot->device_num + 1)) { | 523 | (adr>>16) == (slot->device_num + 1)) { |
524 | /* retain the owner id */ | 524 | /* retain the owner id */ |
525 | acpi_get_id(chandle, &ssdt_id); | 525 | ssdt_hdl = chandle; |
526 | 526 | ||
527 | ret = acpi_bus_get_device(chandle, | 527 | ret = acpi_bus_get_device(chandle, |
528 | &device); | 528 | &device); |
@@ -547,12 +547,13 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
547 | pci_unlock_rescan_remove(); | 547 | pci_unlock_rescan_remove(); |
548 | 548 | ||
549 | /* Remove the SSDT for the slot from the ACPI namespace */ | 549 | /* Remove the SSDT for the slot from the ACPI namespace */ |
550 | if (SN_ACPI_BASE_SUPPORT() && ssdt_id) { | 550 | if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) { |
551 | acpi_status ret; | 551 | acpi_status ret; |
552 | ret = acpi_unload_table_id(ssdt_id); | 552 | ret = acpi_unload_parent_table(ssdt_hdl); |
553 | if (ACPI_FAILURE(ret)) { | 553 | if (ACPI_FAILURE(ret)) { |
554 | printk(KERN_ERR "%s: acpi_unload_table_id failed (0x%x) for id %d\n", | 554 | acpi_handle_err(ssdt_hdl, |
555 | __func__, ret, ssdt_id); | 555 | "%s: acpi_unload_parent_table failed (0x%x)\n", |
556 | __func__, ret); | ||
556 | /* try to continue on */ | 557 | /* try to continue on */ |
557 | } | 558 | } |
558 | } | 559 | } |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index fd60806d3fd0..c3e7dfcf9ff5 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -694,11 +694,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) | |||
694 | { | 694 | { |
695 | resource_size_t phys_addr; | 695 | resource_size_t phys_addr; |
696 | u32 table_offset; | 696 | u32 table_offset; |
697 | unsigned long flags; | ||
697 | u8 bir; | 698 | u8 bir; |
698 | 699 | ||
699 | pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, | 700 | pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, |
700 | &table_offset); | 701 | &table_offset); |
701 | bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); | 702 | bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); |
703 | flags = pci_resource_flags(dev, bir); | ||
704 | if (!flags || (flags & IORESOURCE_UNSET)) | ||
705 | return NULL; | ||
706 | |||
702 | table_offset &= PCI_MSIX_TABLE_OFFSET; | 707 | table_offset &= PCI_MSIX_TABLE_OFFSET; |
703 | phys_addr = pci_resource_start(dev, bir) + table_offset; | 708 | phys_addr = pci_resource_start(dev, bir) + table_offset; |
704 | 709 | ||
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 3542150fc8a3..489063987325 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -501,12 +501,29 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) | |||
501 | return 0; | 501 | return 0; |
502 | } | 502 | } |
503 | 503 | ||
504 | static bool acpi_pci_need_resume(struct pci_dev *dev) | ||
505 | { | ||
506 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); | ||
507 | |||
508 | if (!adev || !acpi_device_power_manageable(adev)) | ||
509 | return false; | ||
510 | |||
511 | if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) | ||
512 | return true; | ||
513 | |||
514 | if (acpi_target_system_state() == ACPI_STATE_S0) | ||
515 | return false; | ||
516 | |||
517 | return !!adev->power.flags.dsw_present; | ||
518 | } | ||
519 | |||
504 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { | 520 | static struct pci_platform_pm_ops acpi_pci_platform_pm = { |
505 | .is_manageable = acpi_pci_power_manageable, | 521 | .is_manageable = acpi_pci_power_manageable, |
506 | .set_state = acpi_pci_set_power_state, | 522 | .set_state = acpi_pci_set_power_state, |
507 | .choose_state = acpi_pci_choose_state, | 523 | .choose_state = acpi_pci_choose_state, |
508 | .sleep_wake = acpi_pci_sleep_wake, | 524 | .sleep_wake = acpi_pci_sleep_wake, |
509 | .run_wake = acpi_pci_run_wake, | 525 | .run_wake = acpi_pci_run_wake, |
526 | .need_resume = acpi_pci_need_resume, | ||
510 | }; | 527 | }; |
511 | 528 | ||
512 | void acpi_pci_add_bus(struct pci_bus *bus) | 529 | void acpi_pci_add_bus(struct pci_bus *bus) |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 887e6bd95af7..3cb2210de553 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -653,7 +653,6 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) | |||
653 | static int pci_pm_prepare(struct device *dev) | 653 | static int pci_pm_prepare(struct device *dev) |
654 | { | 654 | { |
655 | struct device_driver *drv = dev->driver; | 655 | struct device_driver *drv = dev->driver; |
656 | int error = 0; | ||
657 | 656 | ||
658 | /* | 657 | /* |
659 | * Devices having power.ignore_children set may still be necessary for | 658 | * Devices having power.ignore_children set may still be necessary for |
@@ -662,10 +661,12 @@ static int pci_pm_prepare(struct device *dev) | |||
662 | if (dev->power.ignore_children) | 661 | if (dev->power.ignore_children) |
663 | pm_runtime_resume(dev); | 662 | pm_runtime_resume(dev); |
664 | 663 | ||
665 | if (drv && drv->pm && drv->pm->prepare) | 664 | if (drv && drv->pm && drv->pm->prepare) { |
666 | error = drv->pm->prepare(dev); | 665 | int error = drv->pm->prepare(dev); |
667 | 666 | if (error) | |
668 | return error; | 667 | return error; |
668 | } | ||
669 | return pci_dev_keep_suspended(to_pci_dev(dev)); | ||
669 | } | 670 | } |
670 | 671 | ||
671 | 672 | ||
@@ -1383,7 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
1383 | if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) | 1384 | if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) |
1384 | return -ENOMEM; | 1385 | return -ENOMEM; |
1385 | 1386 | ||
1386 | if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x", | 1387 | if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X", |
1387 | pdev->vendor, pdev->device, | 1388 | pdev->vendor, pdev->device, |
1388 | pdev->subsystem_vendor, pdev->subsystem_device, | 1389 | pdev->subsystem_vendor, pdev->subsystem_device, |
1389 | (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), | 1390 | (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e9d4fd861ba1..81f06e8dcc04 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/of.h> | ||
14 | #include <linux/of_pci.h> | ||
13 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
14 | #include <linux/pm.h> | 16 | #include <linux/pm.h> |
15 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -521,6 +523,11 @@ static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | |||
521 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | 523 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; |
522 | } | 524 | } |
523 | 525 | ||
526 | static inline bool platform_pci_need_resume(struct pci_dev *dev) | ||
527 | { | ||
528 | return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false; | ||
529 | } | ||
530 | |||
524 | /** | 531 | /** |
525 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 532 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
526 | * given PCI device | 533 | * given PCI device |
@@ -1999,6 +2006,27 @@ bool pci_dev_run_wake(struct pci_dev *dev) | |||
1999 | } | 2006 | } |
2000 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); | 2007 | EXPORT_SYMBOL_GPL(pci_dev_run_wake); |
2001 | 2008 | ||
2009 | /** | ||
2010 | * pci_dev_keep_suspended - Check if the device can stay in the suspended state. | ||
2011 | * @pci_dev: Device to check. | ||
2012 | * | ||
2013 | * Return 'true' if the device is runtime-suspended, it doesn't have to be | ||
2014 | * reconfigured due to wakeup settings difference between system and runtime | ||
2015 | * suspend and the current power state of it is suitable for the upcoming | ||
2016 | * (system) transition. | ||
2017 | */ | ||
2018 | bool pci_dev_keep_suspended(struct pci_dev *pci_dev) | ||
2019 | { | ||
2020 | struct device *dev = &pci_dev->dev; | ||
2021 | |||
2022 | if (!pm_runtime_suspended(dev) | ||
2023 | || (device_can_wakeup(dev) && !device_may_wakeup(dev)) | ||
2024 | || platform_pci_need_resume(pci_dev)) | ||
2025 | return false; | ||
2026 | |||
2027 | return pci_target_state(pci_dev) == pci_dev->current_state; | ||
2028 | } | ||
2029 | |||
2002 | void pci_config_pm_runtime_get(struct pci_dev *pdev) | 2030 | void pci_config_pm_runtime_get(struct pci_dev *pdev) |
2003 | { | 2031 | { |
2004 | struct device *dev = &pdev->dev; | 2032 | struct device *dev = &pdev->dev; |
@@ -3197,7 +3225,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) | |||
3197 | { | 3225 | { |
3198 | u16 csr; | 3226 | u16 csr; |
3199 | 3227 | ||
3200 | if (!dev->pm_cap) | 3228 | if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET) |
3201 | return -ENOTTY; | 3229 | return -ENOTTY; |
3202 | 3230 | ||
3203 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); | 3231 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); |
@@ -4471,6 +4499,53 @@ int pci_get_new_domain_nr(void) | |||
4471 | { | 4499 | { |
4472 | return atomic_inc_return(&__domain_nr); | 4500 | return atomic_inc_return(&__domain_nr); |
4473 | } | 4501 | } |
4502 | |||
4503 | #ifdef CONFIG_PCI_DOMAINS_GENERIC | ||
4504 | void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) | ||
4505 | { | ||
4506 | static int use_dt_domains = -1; | ||
4507 | int domain = of_get_pci_domain_nr(parent->of_node); | ||
4508 | |||
4509 | /* | ||
4510 | * Check DT domain and use_dt_domains values. | ||
4511 | * | ||
4512 | * If DT domain property is valid (domain >= 0) and | ||
4513 | * use_dt_domains != 0, the DT assignment is valid since this means | ||
4514 | * we have not previously allocated a domain number by using | ||
4515 | * pci_get_new_domain_nr(); we should also update use_dt_domains to | ||
4516 | * 1, to indicate that we have just assigned a domain number from | ||
4517 | * DT. | ||
4518 | * | ||
4519 | * If DT domain property value is not valid (ie domain < 0), and we | ||
4520 | * have not previously assigned a domain number from DT | ||
4521 | * (use_dt_domains != 1) we should assign a domain number by | ||
4522 | * using the: | ||
4523 | * | ||
4524 | * pci_get_new_domain_nr() | ||
4525 | * | ||
4526 | * API and update the use_dt_domains value to keep track of method we | ||
4527 | * are using to assign domain numbers (use_dt_domains = 0). | ||
4528 | * | ||
4529 | * All other combinations imply we have a platform that is trying | ||
4530 | * to mix domain numbers obtained from DT and pci_get_new_domain_nr(), | ||
4531 | * which is a recipe for domain mishandling and it is prevented by | ||
4532 | * invalidating the domain value (domain = -1) and printing a | ||
4533 | * corresponding error. | ||
4534 | */ | ||
4535 | if (domain >= 0 && use_dt_domains) { | ||
4536 | use_dt_domains = 1; | ||
4537 | } else if (domain < 0 && use_dt_domains != 1) { | ||
4538 | use_dt_domains = 0; | ||
4539 | domain = pci_get_new_domain_nr(); | ||
4540 | } else { | ||
4541 | dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n", | ||
4542 | parent->of_node->full_name); | ||
4543 | domain = -1; | ||
4544 | } | ||
4545 | |||
4546 | bus->domain_nr = domain; | ||
4547 | } | ||
4548 | #endif | ||
4474 | #endif | 4549 | #endif |
4475 | 4550 | ||
4476 | /** | 4551 | /** |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d54632a1db43..4091f82239cd 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -50,6 +50,10 @@ int pci_probe_reset_function(struct pci_dev *dev); | |||
50 | * for given device (the device's wake-up capability has to be | 50 | * for given device (the device's wake-up capability has to be |
51 | * enabled by @sleep_wake for this feature to work) | 51 | * enabled by @sleep_wake for this feature to work) |
52 | * | 52 | * |
53 | * @need_resume: returns 'true' if the given device (which is currently | ||
54 | * suspended) needs to be resumed to be configured for system | ||
55 | * wakeup. | ||
56 | * | ||
53 | * If given platform is generally capable of power managing PCI devices, all of | 57 | * If given platform is generally capable of power managing PCI devices, all of |
54 | * these callbacks are mandatory. | 58 | * these callbacks are mandatory. |
55 | */ | 59 | */ |
@@ -59,6 +63,7 @@ struct pci_platform_pm_ops { | |||
59 | pci_power_t (*choose_state)(struct pci_dev *dev); | 63 | pci_power_t (*choose_state)(struct pci_dev *dev); |
60 | int (*sleep_wake)(struct pci_dev *dev, bool enable); | 64 | int (*sleep_wake)(struct pci_dev *dev, bool enable); |
61 | int (*run_wake)(struct pci_dev *dev, bool enable); | 65 | int (*run_wake)(struct pci_dev *dev, bool enable); |
66 | bool (*need_resume)(struct pci_dev *dev); | ||
62 | }; | 67 | }; |
63 | 68 | ||
64 | int pci_set_platform_pm(struct pci_platform_pm_ops *ops); | 69 | int pci_set_platform_pm(struct pci_platform_pm_ops *ops); |
@@ -67,6 +72,7 @@ void pci_power_up(struct pci_dev *dev); | |||
67 | void pci_disable_enabled_device(struct pci_dev *dev); | 72 | void pci_disable_enabled_device(struct pci_dev *dev); |
68 | int pci_finish_runtime_suspend(struct pci_dev *dev); | 73 | int pci_finish_runtime_suspend(struct pci_dev *dev); |
69 | int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | 74 | int __pci_pme_wakeup(struct pci_dev *dev, void *ign); |
75 | bool pci_dev_keep_suspended(struct pci_dev *dev); | ||
70 | void pci_config_pm_runtime_get(struct pci_dev *dev); | 76 | void pci_config_pm_runtime_get(struct pci_dev *dev); |
71 | void pci_config_pm_runtime_put(struct pci_dev *dev); | 77 | void pci_config_pm_runtime_put(struct pci_dev *dev); |
72 | void pci_pm_init(struct pci_dev *dev); | 78 | void pci_pm_init(struct pci_dev *dev); |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index e1e7026b838d..820740a22e94 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -859,7 +859,10 @@ static ssize_t link_state_store(struct device *dev, | |||
859 | { | 859 | { |
860 | struct pci_dev *pdev = to_pci_dev(dev); | 860 | struct pci_dev *pdev = to_pci_dev(dev); |
861 | struct pcie_link_state *link, *root = pdev->link_state->root; | 861 | struct pcie_link_state *link, *root = pdev->link_state->root; |
862 | u32 val = buf[0] - '0', state = 0; | 862 | u32 val, state = 0; |
863 | |||
864 | if (kstrtouint(buf, 10, &val)) | ||
865 | return -EINVAL; | ||
863 | 866 | ||
864 | if (aspm_disabled) | 867 | if (aspm_disabled) |
865 | return -EPERM; | 868 | return -EPERM; |
@@ -900,15 +903,14 @@ static ssize_t clk_ctl_store(struct device *dev, | |||
900 | size_t n) | 903 | size_t n) |
901 | { | 904 | { |
902 | struct pci_dev *pdev = to_pci_dev(dev); | 905 | struct pci_dev *pdev = to_pci_dev(dev); |
903 | int state; | 906 | bool state; |
904 | 907 | ||
905 | if (n < 1) | 908 | if (strtobool(buf, &state)) |
906 | return -EINVAL; | 909 | return -EINVAL; |
907 | state = buf[0]-'0'; | ||
908 | 910 | ||
909 | down_read(&pci_bus_sem); | 911 | down_read(&pci_bus_sem); |
910 | mutex_lock(&aspm_lock); | 912 | mutex_lock(&aspm_lock); |
911 | pcie_set_clkpm_nocheck(pdev->link_state, !!state); | 913 | pcie_set_clkpm_nocheck(pdev->link_state, state); |
912 | mutex_unlock(&aspm_lock); | 914 | mutex_unlock(&aspm_lock); |
913 | up_read(&pci_bus_sem); | 915 | up_read(&pci_bus_sem); |
914 | 916 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 23212f8ae09b..8d2f400e96cb 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1895,7 +1895,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, | |||
1895 | int error; | 1895 | int error; |
1896 | struct pci_host_bridge *bridge; | 1896 | struct pci_host_bridge *bridge; |
1897 | struct pci_bus *b, *b2; | 1897 | struct pci_bus *b, *b2; |
1898 | struct pci_host_bridge_window *window, *n; | 1898 | struct resource_entry *window, *n; |
1899 | struct resource *res; | 1899 | struct resource *res; |
1900 | resource_size_t offset; | 1900 | resource_size_t offset; |
1901 | char bus_addr[64]; | 1901 | char bus_addr[64]; |
@@ -1959,8 +1959,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, | |||
1959 | printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); | 1959 | printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); |
1960 | 1960 | ||
1961 | /* Add initial resources to the bus */ | 1961 | /* Add initial resources to the bus */ |
1962 | list_for_each_entry_safe(window, n, resources, list) { | 1962 | resource_list_for_each_entry_safe(window, n, resources) { |
1963 | list_move_tail(&window->list, &bridge->windows); | 1963 | list_move_tail(&window->node, &bridge->windows); |
1964 | res = window->res; | 1964 | res = window->res; |
1965 | offset = window->offset; | 1965 | offset = window->offset; |
1966 | if (res->flags & IORESOURCE_BUS) | 1966 | if (res->flags & IORESOURCE_BUS) |
@@ -2060,12 +2060,12 @@ void pci_bus_release_busn_res(struct pci_bus *b) | |||
2060 | struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, | 2060 | struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, |
2061 | struct pci_ops *ops, void *sysdata, struct list_head *resources) | 2061 | struct pci_ops *ops, void *sysdata, struct list_head *resources) |
2062 | { | 2062 | { |
2063 | struct pci_host_bridge_window *window; | 2063 | struct resource_entry *window; |
2064 | bool found = false; | 2064 | bool found = false; |
2065 | struct pci_bus *b; | 2065 | struct pci_bus *b; |
2066 | int max; | 2066 | int max; |
2067 | 2067 | ||
2068 | list_for_each_entry(window, resources, list) | 2068 | resource_list_for_each_entry(window, resources) |
2069 | if (window->res->flags & IORESOURCE_BUS) { | 2069 | if (window->res->flags & IORESOURCE_BUS) { |
2070 | found = true; | 2070 | found = true; |
2071 | break; | 2071 | break; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e52356aa09b8..85f247e28a80 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev) | |||
324 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); | 324 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); |
325 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); | 325 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); |
326 | 326 | ||
327 | static void quirk_io(struct pci_dev *dev, int pos, unsigned size, | ||
328 | const char *name) | ||
329 | { | ||
330 | u32 region; | ||
331 | struct pci_bus_region bus_region; | ||
332 | struct resource *res = dev->resource + pos; | ||
333 | |||
334 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); | ||
335 | |||
336 | if (!region) | ||
337 | return; | ||
338 | |||
339 | res->name = pci_name(dev); | ||
340 | res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; | ||
341 | res->flags |= | ||
342 | (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); | ||
343 | region &= ~(size - 1); | ||
344 | |||
345 | /* Convert from PCI bus to resource space */ | ||
346 | bus_region.start = region; | ||
347 | bus_region.end = region + size - 1; | ||
348 | pcibios_bus_to_resource(dev->bus, res, &bus_region); | ||
349 | |||
350 | dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", | ||
351 | name, PCI_BASE_ADDRESS_0 + (pos << 2), res); | ||
352 | } | ||
353 | |||
327 | /* | 354 | /* |
328 | * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS | 355 | * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS |
329 | * ver. 1.33 20070103) don't set the correct ISA PCI region header info. | 356 | * ver. 1.33 20070103) don't set the correct ISA PCI region header info. |
330 | * BAR0 should be 8 bytes; instead, it may be set to something like 8k | 357 | * BAR0 should be 8 bytes; instead, it may be set to something like 8k |
331 | * (which conflicts w/ BAR1's memory range). | 358 | * (which conflicts w/ BAR1's memory range). |
359 | * | ||
360 | * CS553x's ISA PCI BARs may also be read-only (ref: | ||
361 | * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). | ||
332 | */ | 362 | */ |
333 | static void quirk_cs5536_vsa(struct pci_dev *dev) | 363 | static void quirk_cs5536_vsa(struct pci_dev *dev) |
334 | { | 364 | { |
365 | static char *name = "CS5536 ISA bridge"; | ||
366 | |||
335 | if (pci_resource_len(dev, 0) != 8) { | 367 | if (pci_resource_len(dev, 0) != 8) { |
336 | struct resource *res = &dev->resource[0]; | 368 | quirk_io(dev, 0, 8, name); /* SMB */ |
337 | res->end = res->start + 8 - 1; | 369 | quirk_io(dev, 1, 256, name); /* GPIO */ |
338 | dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n"); | 370 | quirk_io(dev, 2, 64, name); /* MFGPT */ |
371 | dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", | ||
372 | name); | ||
339 | } | 373 | } |
340 | } | 374 | } |
341 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); | 375 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); |
@@ -3042,6 +3076,27 @@ static void quirk_no_bus_reset(struct pci_dev *dev) | |||
3042 | */ | 3076 | */ |
3043 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); | 3077 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); |
3044 | 3078 | ||
3079 | static void quirk_no_pm_reset(struct pci_dev *dev) | ||
3080 | { | ||
3081 | /* | ||
3082 | * We can't do a bus reset on root bus devices, but an ineffective | ||
3083 | * PM reset may be better than nothing. | ||
3084 | */ | ||
3085 | if (!pci_is_root_bus(dev->bus)) | ||
3086 | dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET; | ||
3087 | } | ||
3088 | |||
3089 | /* | ||
3090 | * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition | ||
3091 | * causes a reset (i.e., they advertise NoSoftRst-). This transition seems | ||
3092 | * to have no effect on the device: it retains the framebuffer contents and | ||
3093 | * monitor sync. Advertising this support makes other layers, like VFIO, | ||
3094 | * assume pci_reset_function() is viable for this device. Mark it as | ||
3095 | * unavailable to skip it when testing reset methods. | ||
3096 | */ | ||
3097 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID, | ||
3098 | PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset); | ||
3099 | |||
3045 | #ifdef CONFIG_ACPI | 3100 | #ifdef CONFIG_ACPI |
3046 | /* | 3101 | /* |
3047 | * Apple: Shutdown Cactus Ridge Thunderbolt controller. | 3102 | * Apple: Shutdown Cactus Ridge Thunderbolt controller. |
@@ -3542,6 +3597,44 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, | |||
3542 | quirk_dma_func1_alias); | 3597 | quirk_dma_func1_alias); |
3543 | 3598 | ||
3544 | /* | 3599 | /* |
3600 | * Some devices DMA with the wrong devfn, not just the wrong function. | ||
3601 | * quirk_fixed_dma_alias() uses this table to create fixed aliases, where | ||
3602 | * the alias is "fixed" and independent of the device devfn. | ||
3603 | * | ||
3604 | * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O | ||
3605 | * processor. To software, this appears as a PCIe-to-PCI/X bridge with a | ||
3606 | * single device on the secondary bus. In reality, the single exposed | ||
3607 | * device at 0e.0 is the Address Translation Unit (ATU) of the controller | ||
3608 | * that provides a bridge to the internal bus of the I/O processor. The | ||
3609 | * controller supports private devices, which can be hidden from PCI config | ||
3610 | * space. In the case of the Adaptec 3405, a private device at 01.0 | ||
3611 | * appears to be the DMA engine, which therefore needs to become a DMA | ||
3612 | * alias for the device. | ||
3613 | */ | ||
3614 | static const struct pci_device_id fixed_dma_alias_tbl[] = { | ||
3615 | { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285, | ||
3616 | PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */ | ||
3617 | .driver_data = PCI_DEVFN(1, 0) }, | ||
3618 | { 0 } | ||
3619 | }; | ||
3620 | |||
3621 | static void quirk_fixed_dma_alias(struct pci_dev *dev) | ||
3622 | { | ||
3623 | const struct pci_device_id *id; | ||
3624 | |||
3625 | id = pci_match_id(fixed_dma_alias_tbl, dev); | ||
3626 | if (id) { | ||
3627 | dev->dma_alias_devfn = id->driver_data; | ||
3628 | dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; | ||
3629 | dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", | ||
3630 | PCI_SLOT(dev->dma_alias_devfn), | ||
3631 | PCI_FUNC(dev->dma_alias_devfn)); | ||
3632 | } | ||
3633 | } | ||
3634 | |||
3635 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias); | ||
3636 | |||
3637 | /* | ||
3545 | * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in | 3638 | * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in |
3546 | * using the wrong DMA alias for the device. Some of these devices can be | 3639 | * using the wrong DMA alias for the device. Some of these devices can be |
3547 | * used as either forward or reverse bridges, so we need to test whether the | 3640 | * used as either forward or reverse bridges, so we need to test whether the |
@@ -3644,6 +3737,9 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = { | |||
3644 | 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, | 3737 | 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, |
3645 | /* Patsburg (X79) PCH */ | 3738 | /* Patsburg (X79) PCH */ |
3646 | 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, | 3739 | 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, |
3740 | /* Wellsburg (X99) PCH */ | ||
3741 | 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17, | ||
3742 | 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e, | ||
3647 | }; | 3743 | }; |
3648 | 3744 | ||
3649 | static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) | 3745 | static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) |
@@ -3727,6 +3823,8 @@ static const struct pci_dev_acs_enabled { | |||
3727 | { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs }, | 3823 | { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs }, |
3728 | { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs }, | 3824 | { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs }, |
3729 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, | 3825 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, |
3826 | { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */ | ||
3827 | { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ | ||
3730 | { 0 } | 3828 | { 0 } |
3731 | }; | 3829 | }; |
3732 | 3830 | ||
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index f955edb9bea7..eb0ad530dc43 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -71,6 +71,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) | |||
71 | { | 71 | { |
72 | void __iomem *image; | 72 | void __iomem *image; |
73 | int last_image; | 73 | int last_image; |
74 | unsigned length; | ||
74 | 75 | ||
75 | image = rom; | 76 | image = rom; |
76 | do { | 77 | do { |
@@ -93,9 +94,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) | |||
93 | if (readb(pds + 3) != 'R') | 94 | if (readb(pds + 3) != 'R') |
94 | break; | 95 | break; |
95 | last_image = readb(pds + 21) & 0x80; | 96 | last_image = readb(pds + 21) & 0x80; |
96 | /* this length is reliable */ | 97 | length = readw(pds + 16); |
97 | image += readw(pds + 16) * 512; | 98 | image += length * 512; |
98 | } while (!last_image); | 99 | } while (length && !last_image); |
99 | 100 | ||
100 | /* never return a size larger than the PCI resource window */ | 101 | /* never return a size larger than the PCI resource window */ |
101 | /* there are known ROMs that get the size wrong */ | 102 | /* there are known ROMs that get the size wrong */ |
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index dfd021e8268f..f4cd0b9b2438 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
@@ -177,7 +177,7 @@ struct at91_pinctrl { | |||
177 | struct device *dev; | 177 | struct device *dev; |
178 | struct pinctrl_dev *pctl; | 178 | struct pinctrl_dev *pctl; |
179 | 179 | ||
180 | int nbanks; | 180 | int nactive_banks; |
181 | 181 | ||
182 | uint32_t *mux_mask; | 182 | uint32_t *mux_mask; |
183 | int nmux; | 183 | int nmux; |
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name, | |||
653 | int mux; | 653 | int mux; |
654 | 654 | ||
655 | /* check if it's a valid config */ | 655 | /* check if it's a valid config */ |
656 | if (pin->bank >= info->nbanks) { | 656 | if (pin->bank >= gpio_banks) { |
657 | dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", | 657 | dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", |
658 | name, index, pin->bank, info->nbanks); | 658 | name, index, pin->bank, gpio_banks); |
659 | return -EINVAL; | 659 | return -EINVAL; |
660 | } | 660 | } |
661 | 661 | ||
662 | if (!gpio_chips[pin->bank]) { | ||
663 | dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n", | ||
664 | name, index, pin->bank); | ||
665 | return -ENXIO; | ||
666 | } | ||
667 | |||
662 | if (pin->pin >= MAX_NB_GPIO_PER_BANK) { | 668 | if (pin->pin >= MAX_NB_GPIO_PER_BANK) { |
663 | dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", | 669 | dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", |
664 | name, index, pin->pin, MAX_NB_GPIO_PER_BANK); | 670 | name, index, pin->pin, MAX_NB_GPIO_PER_BANK); |
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info, | |||
981 | 987 | ||
982 | for_each_child_of_node(np, child) { | 988 | for_each_child_of_node(np, child) { |
983 | if (of_device_is_compatible(child, gpio_compat)) { | 989 | if (of_device_is_compatible(child, gpio_compat)) { |
984 | info->nbanks++; | 990 | if (of_device_is_available(child)) |
991 | info->nactive_banks++; | ||
985 | } else { | 992 | } else { |
986 | info->nfunctions++; | 993 | info->nfunctions++; |
987 | info->ngroups += of_get_child_count(child); | 994 | info->ngroups += of_get_child_count(child); |
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info, | |||
1003 | } | 1010 | } |
1004 | 1011 | ||
1005 | size /= sizeof(*list); | 1012 | size /= sizeof(*list); |
1006 | if (!size || size % info->nbanks) { | 1013 | if (!size || size % gpio_banks) { |
1007 | dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); | 1014 | dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks); |
1008 | return -EINVAL; | 1015 | return -EINVAL; |
1009 | } | 1016 | } |
1010 | info->nmux = size / info->nbanks; | 1017 | info->nmux = size / gpio_banks; |
1011 | 1018 | ||
1012 | info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); | 1019 | info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); |
1013 | if (!info->mux_mask) { | 1020 | if (!info->mux_mask) { |
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
1131 | of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; | 1138 | of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; |
1132 | at91_pinctrl_child_count(info, np); | 1139 | at91_pinctrl_child_count(info, np); |
1133 | 1140 | ||
1134 | if (info->nbanks < 1) { | 1141 | if (gpio_banks < 1) { |
1135 | dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); | 1142 | dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); |
1136 | return -EINVAL; | 1143 | return -EINVAL; |
1137 | } | 1144 | } |
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
1144 | 1151 | ||
1145 | dev_dbg(&pdev->dev, "mux-mask\n"); | 1152 | dev_dbg(&pdev->dev, "mux-mask\n"); |
1146 | tmp = info->mux_mask; | 1153 | tmp = info->mux_mask; |
1147 | for (i = 0; i < info->nbanks; i++) { | 1154 | for (i = 0; i < gpio_banks; i++) { |
1148 | for (j = 0; j < info->nmux; j++, tmp++) { | 1155 | for (j = 0; j < info->nmux; j++, tmp++) { |
1149 | dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); | 1156 | dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); |
1150 | } | 1157 | } |
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
1162 | if (!info->groups) | 1169 | if (!info->groups) |
1163 | return -ENOMEM; | 1170 | return -ENOMEM; |
1164 | 1171 | ||
1165 | dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); | 1172 | dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks); |
1166 | dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); | 1173 | dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); |
1167 | dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); | 1174 | dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); |
1168 | 1175 | ||
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
1185 | { | 1192 | { |
1186 | struct at91_pinctrl *info; | 1193 | struct at91_pinctrl *info; |
1187 | struct pinctrl_pin_desc *pdesc; | 1194 | struct pinctrl_pin_desc *pdesc; |
1188 | int ret, i, j, k; | 1195 | int ret, i, j, k, ngpio_chips_enabled = 0; |
1189 | 1196 | ||
1190 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); | 1197 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); |
1191 | if (!info) | 1198 | if (!info) |
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
1200 | * to obtain references to the struct gpio_chip * for them, and we | 1207 | * to obtain references to the struct gpio_chip * for them, and we |
1201 | * need this to proceed. | 1208 | * need this to proceed. |
1202 | */ | 1209 | */ |
1203 | for (i = 0; i < info->nbanks; i++) { | 1210 | for (i = 0; i < gpio_banks; i++) |
1204 | if (!gpio_chips[i]) { | 1211 | if (gpio_chips[i]) |
1205 | dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); | 1212 | ngpio_chips_enabled++; |
1206 | devm_kfree(&pdev->dev, info); | 1213 | |
1207 | return -EPROBE_DEFER; | 1214 | if (ngpio_chips_enabled < info->nactive_banks) { |
1208 | } | 1215 | dev_warn(&pdev->dev, |
1216 | "All GPIO chips are not registered yet (%d/%d)\n", | ||
1217 | ngpio_chips_enabled, info->nactive_banks); | ||
1218 | devm_kfree(&pdev->dev, info); | ||
1219 | return -EPROBE_DEFER; | ||
1209 | } | 1220 | } |
1210 | 1221 | ||
1211 | at91_pinctrl_desc.name = dev_name(&pdev->dev); | 1222 | at91_pinctrl_desc.name = dev_name(&pdev->dev); |
1212 | at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; | 1223 | at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK; |
1213 | at91_pinctrl_desc.pins = pdesc = | 1224 | at91_pinctrl_desc.pins = pdesc = |
1214 | devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); | 1225 | devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); |
1215 | 1226 | ||
1216 | if (!at91_pinctrl_desc.pins) | 1227 | if (!at91_pinctrl_desc.pins) |
1217 | return -ENOMEM; | 1228 | return -ENOMEM; |
1218 | 1229 | ||
1219 | for (i = 0 , k = 0; i < info->nbanks; i++) { | 1230 | for (i = 0, k = 0; i < gpio_banks; i++) { |
1220 | for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { | 1231 | for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { |
1221 | pdesc->number = k; | 1232 | pdesc->number = k; |
1222 | pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); | 1233 | pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); |
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
1234 | } | 1245 | } |
1235 | 1246 | ||
1236 | /* We will handle a range of GPIO pins */ | 1247 | /* We will handle a range of GPIO pins */ |
1237 | for (i = 0; i < info->nbanks; i++) | 1248 | for (i = 0; i < gpio_banks; i++) |
1238 | pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); | 1249 | if (gpio_chips[i]) |
1250 | pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); | ||
1239 | 1251 | ||
1240 | dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); | 1252 | dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); |
1241 | 1253 | ||
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
1613 | static int at91_gpio_of_irq_setup(struct platform_device *pdev, | 1625 | static int at91_gpio_of_irq_setup(struct platform_device *pdev, |
1614 | struct at91_gpio_chip *at91_gpio) | 1626 | struct at91_gpio_chip *at91_gpio) |
1615 | { | 1627 | { |
1628 | struct gpio_chip *gpiochip_prev = NULL; | ||
1616 | struct at91_gpio_chip *prev = NULL; | 1629 | struct at91_gpio_chip *prev = NULL; |
1617 | struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); | 1630 | struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); |
1618 | int ret; | 1631 | int ret, i; |
1619 | 1632 | ||
1620 | at91_gpio->pioc_hwirq = irqd_to_hwirq(d); | 1633 | at91_gpio->pioc_hwirq = irqd_to_hwirq(d); |
1621 | 1634 | ||
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, | |||
1641 | return ret; | 1654 | return ret; |
1642 | } | 1655 | } |
1643 | 1656 | ||
1644 | /* Setup chained handler */ | ||
1645 | if (at91_gpio->pioc_idx) | ||
1646 | prev = gpio_chips[at91_gpio->pioc_idx - 1]; | ||
1647 | |||
1648 | /* The top level handler handles one bank of GPIOs, except | 1657 | /* The top level handler handles one bank of GPIOs, except |
1649 | * on some SoC it can handle up to three... | 1658 | * on some SoC it can handle up to three... |
1650 | * We only set up the handler for the first of the list. | 1659 | * We only set up the handler for the first of the list. |
1651 | */ | 1660 | */ |
1652 | if (prev && prev->next == at91_gpio) | 1661 | gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq); |
1662 | if (!gpiochip_prev) { | ||
1663 | /* Then register the chain on the parent IRQ */ | ||
1664 | gpiochip_set_chained_irqchip(&at91_gpio->chip, | ||
1665 | &gpio_irqchip, | ||
1666 | at91_gpio->pioc_virq, | ||
1667 | gpio_irq_handler); | ||
1653 | return 0; | 1668 | return 0; |
1669 | } | ||
1654 | 1670 | ||
1655 | /* Then register the chain on the parent IRQ */ | 1671 | prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip); |
1656 | gpiochip_set_chained_irqchip(&at91_gpio->chip, | ||
1657 | &gpio_irqchip, | ||
1658 | at91_gpio->pioc_virq, | ||
1659 | gpio_irq_handler); | ||
1660 | 1672 | ||
1661 | return 0; | 1673 | /* we can only have 2 banks before */ |
1674 | for (i = 0; i < 2; i++) { | ||
1675 | if (prev->next) { | ||
1676 | prev = prev->next; | ||
1677 | } else { | ||
1678 | prev->next = at91_gpio; | ||
1679 | return 0; | ||
1680 | } | ||
1681 | } | ||
1682 | |||
1683 | return -EINVAL; | ||
1662 | } | 1684 | } |
1663 | 1685 | ||
1664 | /* This structure is replicated for each GPIO block allocated at probe time */ | 1686 | /* This structure is replicated for each GPIO block allocated at probe time */ |
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = { | |||
1675 | .ngpio = MAX_NB_GPIO_PER_BANK, | 1697 | .ngpio = MAX_NB_GPIO_PER_BANK, |
1676 | }; | 1698 | }; |
1677 | 1699 | ||
1678 | static void at91_gpio_probe_fixup(void) | ||
1679 | { | ||
1680 | unsigned i; | ||
1681 | struct at91_gpio_chip *at91_gpio, *last = NULL; | ||
1682 | |||
1683 | for (i = 0; i < gpio_banks; i++) { | ||
1684 | at91_gpio = gpio_chips[i]; | ||
1685 | |||
1686 | /* | ||
1687 | * GPIO controller are grouped on some SoC: | ||
1688 | * PIOC, PIOD and PIOE can share the same IRQ line | ||
1689 | */ | ||
1690 | if (last && last->pioc_virq == at91_gpio->pioc_virq) | ||
1691 | last->next = at91_gpio; | ||
1692 | last = at91_gpio; | ||
1693 | } | ||
1694 | } | ||
1695 | |||
1696 | static struct of_device_id at91_gpio_of_match[] = { | 1700 | static struct of_device_id at91_gpio_of_match[] = { |
1697 | { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, | 1701 | { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, |
1698 | { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, | 1702 | { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, |
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev) | |||
1805 | gpio_chips[alias_idx] = at91_chip; | 1809 | gpio_chips[alias_idx] = at91_chip; |
1806 | gpio_banks = max(gpio_banks, alias_idx + 1); | 1810 | gpio_banks = max(gpio_banks, alias_idx + 1); |
1807 | 1811 | ||
1808 | at91_gpio_probe_fixup(); | ||
1809 | |||
1810 | ret = at91_gpio_of_irq_setup(pdev, at91_chip); | 1812 | ret = at91_gpio_of_irq_setup(pdev, at91_chip); |
1811 | if (ret) | 1813 | if (ret) |
1812 | goto irq_setup_err; | 1814 | goto irq_setup_err; |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 66977ebf13b3..ff0356fb378f 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -180,20 +180,21 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
180 | struct pnp_dev *dev = data; | 180 | struct pnp_dev *dev = data; |
181 | struct acpi_resource_dma *dma; | 181 | struct acpi_resource_dma *dma; |
182 | struct acpi_resource_vendor_typed *vendor_typed; | 182 | struct acpi_resource_vendor_typed *vendor_typed; |
183 | struct resource r = {0}; | 183 | struct resource_win win = {{0}, 0}; |
184 | struct resource *r = &win.res; | ||
184 | int i, flags; | 185 | int i, flags; |
185 | 186 | ||
186 | if (acpi_dev_resource_address_space(res, &r) | 187 | if (acpi_dev_resource_address_space(res, &win) |
187 | || acpi_dev_resource_ext_address_space(res, &r)) { | 188 | || acpi_dev_resource_ext_address_space(res, &win)) { |
188 | pnp_add_resource(dev, &r); | 189 | pnp_add_resource(dev, &win.res); |
189 | return AE_OK; | 190 | return AE_OK; |
190 | } | 191 | } |
191 | 192 | ||
192 | r.flags = 0; | 193 | r->flags = 0; |
193 | if (acpi_dev_resource_interrupt(res, 0, &r)) { | 194 | if (acpi_dev_resource_interrupt(res, 0, r)) { |
194 | pnpacpi_add_irqresource(dev, &r); | 195 | pnpacpi_add_irqresource(dev, r); |
195 | for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++) | 196 | for (i = 1; acpi_dev_resource_interrupt(res, i, r); i++) |
196 | pnpacpi_add_irqresource(dev, &r); | 197 | pnpacpi_add_irqresource(dev, r); |
197 | 198 | ||
198 | if (i > 1) { | 199 | if (i > 1) { |
199 | /* | 200 | /* |
@@ -209,7 +210,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
209 | } | 210 | } |
210 | } | 211 | } |
211 | return AE_OK; | 212 | return AE_OK; |
212 | } else if (r.flags & IORESOURCE_DISABLED) { | 213 | } else if (r->flags & IORESOURCE_DISABLED) { |
213 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); | 214 | pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); |
214 | return AE_OK; | 215 | return AE_OK; |
215 | } | 216 | } |
@@ -218,13 +219,13 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
218 | case ACPI_RESOURCE_TYPE_MEMORY24: | 219 | case ACPI_RESOURCE_TYPE_MEMORY24: |
219 | case ACPI_RESOURCE_TYPE_MEMORY32: | 220 | case ACPI_RESOURCE_TYPE_MEMORY32: |
220 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | 221 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: |
221 | if (acpi_dev_resource_memory(res, &r)) | 222 | if (acpi_dev_resource_memory(res, r)) |
222 | pnp_add_resource(dev, &r); | 223 | pnp_add_resource(dev, r); |
223 | break; | 224 | break; |
224 | case ACPI_RESOURCE_TYPE_IO: | 225 | case ACPI_RESOURCE_TYPE_IO: |
225 | case ACPI_RESOURCE_TYPE_FIXED_IO: | 226 | case ACPI_RESOURCE_TYPE_FIXED_IO: |
226 | if (acpi_dev_resource_io(res, &r)) | 227 | if (acpi_dev_resource_io(res, r)) |
227 | pnp_add_resource(dev, &r); | 228 | pnp_add_resource(dev, r); |
228 | break; | 229 | break; |
229 | case ACPI_RESOURCE_TYPE_DMA: | 230 | case ACPI_RESOURCE_TYPE_DMA: |
230 | dma = &res->data.dma; | 231 | dma = &res->data.dma; |
@@ -410,12 +411,12 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, | |||
410 | if (p->resource_type == ACPI_MEMORY_RANGE) { | 411 | if (p->resource_type == ACPI_MEMORY_RANGE) { |
411 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | 412 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) |
412 | flags = IORESOURCE_MEM_WRITEABLE; | 413 | flags = IORESOURCE_MEM_WRITEABLE; |
413 | pnp_register_mem_resource(dev, option_flags, p->minimum, | 414 | pnp_register_mem_resource(dev, option_flags, p->address.minimum, |
414 | p->minimum, 0, p->address_length, | 415 | p->address.minimum, 0, p->address.address_length, |
415 | flags); | 416 | flags); |
416 | } else if (p->resource_type == ACPI_IO_RANGE) | 417 | } else if (p->resource_type == ACPI_IO_RANGE) |
417 | pnp_register_port_resource(dev, option_flags, p->minimum, | 418 | pnp_register_port_resource(dev, option_flags, p->address.minimum, |
418 | p->minimum, 0, p->address_length, | 419 | p->address.minimum, 0, p->address.address_length, |
419 | IORESOURCE_IO_FIXED); | 420 | IORESOURCE_IO_FIXED); |
420 | } | 421 | } |
421 | 422 | ||
@@ -429,12 +430,12 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, | |||
429 | if (p->resource_type == ACPI_MEMORY_RANGE) { | 430 | if (p->resource_type == ACPI_MEMORY_RANGE) { |
430 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | 431 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) |
431 | flags = IORESOURCE_MEM_WRITEABLE; | 432 | flags = IORESOURCE_MEM_WRITEABLE; |
432 | pnp_register_mem_resource(dev, option_flags, p->minimum, | 433 | pnp_register_mem_resource(dev, option_flags, p->address.minimum, |
433 | p->minimum, 0, p->address_length, | 434 | p->address.minimum, 0, p->address.address_length, |
434 | flags); | 435 | flags); |
435 | } else if (p->resource_type == ACPI_IO_RANGE) | 436 | } else if (p->resource_type == ACPI_IO_RANGE) |
436 | pnp_register_port_resource(dev, option_flags, p->minimum, | 437 | pnp_register_port_resource(dev, option_flags, p->address.minimum, |
437 | p->minimum, 0, p->address_length, | 438 | p->address.minimum, 0, p->address.address_length, |
438 | IORESOURCE_IO_FIXED); | 439 | IORESOURCE_IO_FIXED); |
439 | } | 440 | } |
440 | 441 | ||
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 8bcfecd66281..eeca70ddbf61 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
@@ -2430,7 +2430,7 @@ static int tsi721_probe(struct pci_dev *pdev, | |||
2430 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, | 2430 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, |
2431 | PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | | 2431 | PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | |
2432 | PCI_EXP_DEVCTL_NOSNOOP_EN, | 2432 | PCI_EXP_DEVCTL_NOSNOOP_EN, |
2433 | 0x2 << MAX_READ_REQUEST_SZ_SHIFT); | 2433 | PCI_EXP_DEVCTL_READRQ_512B); |
2434 | 2434 | ||
2435 | /* Adjust PCIe completion timeout. */ | 2435 | /* Adjust PCIe completion timeout. */ |
2436 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); | 2436 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); |
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index a7b42680a06a..9d2502543ef6 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h | |||
@@ -72,8 +72,6 @@ | |||
72 | #define TSI721_MSIXPBA_OFFSET 0x2a000 | 72 | #define TSI721_MSIXPBA_OFFSET 0x2a000 |
73 | #define TSI721_PCIECFG_EPCTL 0x400 | 73 | #define TSI721_PCIECFG_EPCTL 0x400 |
74 | 74 | ||
75 | #define MAX_READ_REQUEST_SZ_SHIFT 12 | ||
76 | |||
77 | /* | 75 | /* |
78 | * Event Management Registers | 76 | * Event Management Registers |
79 | */ | 77 | */ |
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index c3a60b57a865..a6f116aa5235 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig | |||
@@ -414,6 +414,14 @@ config REGULATOR_MAX77802 | |||
414 | Exynos5420/Exynos5800 SoCs to control various voltages. | 414 | Exynos5420/Exynos5800 SoCs to control various voltages. |
415 | It includes support for control of voltage and ramp speed. | 415 | It includes support for control of voltage and ramp speed. |
416 | 416 | ||
417 | config REGULATOR_MAX77843 | ||
418 | tristate "Maxim 77843 regulator" | ||
419 | depends on MFD_MAX77843 | ||
420 | help | ||
421 | This driver controls a Maxim 77843 regulator. | ||
422 | The regulator include two 'SAFEOUT' for USB(Universal Serial Bus) | ||
423 | This is suitable for Exynos5433 SoC chips. | ||
424 | |||
417 | config REGULATOR_MC13XXX_CORE | 425 | config REGULATOR_MC13XXX_CORE |
418 | tristate | 426 | tristate |
419 | 427 | ||
@@ -433,6 +441,15 @@ config REGULATOR_MC13892 | |||
433 | Say y here to support the regulators found on the Freescale MC13892 | 441 | Say y here to support the regulators found on the Freescale MC13892 |
434 | PMIC. | 442 | PMIC. |
435 | 443 | ||
444 | config REGULATOR_MT6397 | ||
445 | tristate "MediaTek MT6397 PMIC" | ||
446 | depends on MFD_MT6397 | ||
447 | help | ||
448 | Say y here to select this option to enable the power regulator of | ||
449 | MediaTek MT6397 PMIC. | ||
450 | This driver supports the control of different power rails of device | ||
451 | through regulator interface. | ||
452 | |||
436 | config REGULATOR_PALMAS | 453 | config REGULATOR_PALMAS |
437 | tristate "TI Palmas PMIC Regulators" | 454 | tristate "TI Palmas PMIC Regulators" |
438 | depends on MFD_PALMAS | 455 | depends on MFD_PALMAS |
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 1f28ebfc6f3a..2c4da15e1545 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile | |||
@@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o | |||
55 | obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o | 55 | obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o |
56 | obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o | 56 | obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o |
57 | obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o | 57 | obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o |
58 | obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o | ||
58 | obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o | 59 | obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o |
59 | obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o | 60 | obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o |
60 | obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o | 61 | obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o |
62 | obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o | ||
61 | obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o | 63 | obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o |
62 | obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o | 64 | obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o |
63 | obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o | 65 | obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o |
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index f23d7e1f2ee7..e4331f5e5d7d 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c | |||
@@ -32,11 +32,13 @@ | |||
32 | 32 | ||
33 | #define AXP20X_FREQ_DCDC_MASK 0x0f | 33 | #define AXP20X_FREQ_DCDC_MASK 0x0f |
34 | 34 | ||
35 | #define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ | 35 | #define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \ |
36 | _emask, _enable_val, _disable_val) \ | 36 | _ereg, _emask, _enable_val, _disable_val) \ |
37 | [AXP20X_##_id] = { \ | 37 | [AXP20X_##_id] = { \ |
38 | .name = #_id, \ | 38 | .name = #_id, \ |
39 | .supply_name = (_supply), \ | 39 | .supply_name = (_supply), \ |
40 | .of_match = of_match_ptr(_match), \ | ||
41 | .regulators_node = of_match_ptr("regulators"), \ | ||
40 | .type = REGULATOR_VOLTAGE, \ | 42 | .type = REGULATOR_VOLTAGE, \ |
41 | .id = AXP20X_##_id, \ | 43 | .id = AXP20X_##_id, \ |
42 | .n_voltages = (((_max) - (_min)) / (_step) + 1), \ | 44 | .n_voltages = (((_max) - (_min)) / (_step) + 1), \ |
@@ -52,11 +54,13 @@ | |||
52 | .ops = &axp20x_ops, \ | 54 | .ops = &axp20x_ops, \ |
53 | } | 55 | } |
54 | 56 | ||
55 | #define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg, \ | 57 | #define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \ |
56 | _emask) \ | 58 | _ereg, _emask) \ |
57 | [AXP20X_##_id] = { \ | 59 | [AXP20X_##_id] = { \ |
58 | .name = #_id, \ | 60 | .name = #_id, \ |
59 | .supply_name = (_supply), \ | 61 | .supply_name = (_supply), \ |
62 | .of_match = of_match_ptr(_match), \ | ||
63 | .regulators_node = of_match_ptr("regulators"), \ | ||
60 | .type = REGULATOR_VOLTAGE, \ | 64 | .type = REGULATOR_VOLTAGE, \ |
61 | .id = AXP20X_##_id, \ | 65 | .id = AXP20X_##_id, \ |
62 | .n_voltages = (((_max) - (_min)) / (_step) + 1), \ | 66 | .n_voltages = (((_max) - (_min)) / (_step) + 1), \ |
@@ -70,10 +74,12 @@ | |||
70 | .ops = &axp20x_ops, \ | 74 | .ops = &axp20x_ops, \ |
71 | } | 75 | } |
72 | 76 | ||
73 | #define AXP20X_DESC_FIXED(_id, _supply, _volt) \ | 77 | #define AXP20X_DESC_FIXED(_id, _match, _supply, _volt) \ |
74 | [AXP20X_##_id] = { \ | 78 | [AXP20X_##_id] = { \ |
75 | .name = #_id, \ | 79 | .name = #_id, \ |
76 | .supply_name = (_supply), \ | 80 | .supply_name = (_supply), \ |
81 | .of_match = of_match_ptr(_match), \ | ||
82 | .regulators_node = of_match_ptr("regulators"), \ | ||
77 | .type = REGULATOR_VOLTAGE, \ | 83 | .type = REGULATOR_VOLTAGE, \ |
78 | .id = AXP20X_##_id, \ | 84 | .id = AXP20X_##_id, \ |
79 | .n_voltages = 1, \ | 85 | .n_voltages = 1, \ |
@@ -82,10 +88,13 @@ | |||
82 | .ops = &axp20x_ops_fixed \ | 88 | .ops = &axp20x_ops_fixed \ |
83 | } | 89 | } |
84 | 90 | ||
85 | #define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask) \ | 91 | #define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg, \ |
92 | _emask) \ | ||
86 | [AXP20X_##_id] = { \ | 93 | [AXP20X_##_id] = { \ |
87 | .name = #_id, \ | 94 | .name = #_id, \ |
88 | .supply_name = (_supply), \ | 95 | .supply_name = (_supply), \ |
96 | .of_match = of_match_ptr(_match), \ | ||
97 | .regulators_node = of_match_ptr("regulators"), \ | ||
89 | .type = REGULATOR_VOLTAGE, \ | 98 | .type = REGULATOR_VOLTAGE, \ |
90 | .id = AXP20X_##_id, \ | 99 | .id = AXP20X_##_id, \ |
91 | .n_voltages = ARRAY_SIZE(_table), \ | 100 | .n_voltages = ARRAY_SIZE(_table), \ |
@@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = { | |||
127 | }; | 136 | }; |
128 | 137 | ||
129 | static const struct regulator_desc axp20x_regulators[] = { | 138 | static const struct regulator_desc axp20x_regulators[] = { |
130 | AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f, | 139 | AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, |
131 | AXP20X_PWR_OUT_CTRL, 0x10), | 140 | 0x3f, AXP20X_PWR_OUT_CTRL, 0x10), |
132 | AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f, | 141 | AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, |
133 | AXP20X_PWR_OUT_CTRL, 0x02), | 142 | 0x7f, AXP20X_PWR_OUT_CTRL, 0x02), |
134 | AXP20X_DESC_FIXED(LDO1, "acin", 1300), | 143 | AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300), |
135 | AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0, | 144 | AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100, |
136 | AXP20X_PWR_OUT_CTRL, 0x04), | 145 | AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04), |
137 | AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f, | 146 | AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, |
138 | AXP20X_PWR_OUT_CTRL, 0x40), | 147 | 0x7f, AXP20X_PWR_OUT_CTRL, 0x40), |
139 | AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f, | 148 | AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data, |
140 | AXP20X_PWR_OUT_CTRL, 0x08), | 149 | AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08), |
141 | AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0, | 150 | AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100, |
142 | AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED, | 151 | AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07, |
143 | AXP20X_IO_DISABLED), | 152 | AXP20X_IO_ENABLED, AXP20X_IO_DISABLED), |
144 | }; | ||
145 | |||
146 | #define AXP_MATCH(_name, _id) \ | ||
147 | [AXP20X_##_id] = { \ | ||
148 | .name = #_name, \ | ||
149 | .driver_data = (void *) &axp20x_regulators[AXP20X_##_id], \ | ||
150 | } | ||
151 | |||
152 | static struct of_regulator_match axp20x_matches[] = { | ||
153 | AXP_MATCH(dcdc2, DCDC2), | ||
154 | AXP_MATCH(dcdc3, DCDC3), | ||
155 | AXP_MATCH(ldo1, LDO1), | ||
156 | AXP_MATCH(ldo2, LDO2), | ||
157 | AXP_MATCH(ldo3, LDO3), | ||
158 | AXP_MATCH(ldo4, LDO4), | ||
159 | AXP_MATCH(ldo5, LDO5), | ||
160 | }; | 153 | }; |
161 | 154 | ||
162 | static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) | 155 | static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) |
@@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev) | |||
193 | if (!regulators) { | 186 | if (!regulators) { |
194 | dev_warn(&pdev->dev, "regulators node not found\n"); | 187 | dev_warn(&pdev->dev, "regulators node not found\n"); |
195 | } else { | 188 | } else { |
196 | ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches, | ||
197 | ARRAY_SIZE(axp20x_matches)); | ||
198 | if (ret < 0) { | ||
199 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); | ||
200 | return ret; | ||
201 | } | ||
202 | |||
203 | dcdcfreq = 1500; | 189 | dcdcfreq = 1500; |
204 | of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq); | 190 | of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq); |
205 | ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); | 191 | ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); |
@@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev) | |||
233 | { | 219 | { |
234 | struct regulator_dev *rdev; | 220 | struct regulator_dev *rdev; |
235 | struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); | 221 | struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); |
236 | struct regulator_config config = { }; | 222 | struct regulator_config config = { |
237 | struct regulator_init_data *init_data; | 223 | .dev = pdev->dev.parent, |
224 | .regmap = axp20x->regmap, | ||
225 | }; | ||
238 | int ret, i; | 226 | int ret, i; |
239 | u32 workmode; | 227 | u32 workmode; |
240 | 228 | ||
241 | ret = axp20x_regulator_parse_dt(pdev); | 229 | /* This only sets the dcdc freq. Ignore any errors */ |
242 | if (ret) | 230 | axp20x_regulator_parse_dt(pdev); |
243 | return ret; | ||
244 | 231 | ||
245 | for (i = 0; i < AXP20X_REG_ID_MAX; i++) { | 232 | for (i = 0; i < AXP20X_REG_ID_MAX; i++) { |
246 | init_data = axp20x_matches[i].init_data; | ||
247 | |||
248 | config.dev = pdev->dev.parent; | ||
249 | config.init_data = init_data; | ||
250 | config.regmap = axp20x->regmap; | ||
251 | config.of_node = axp20x_matches[i].of_node; | ||
252 | |||
253 | rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i], | 233 | rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i], |
254 | &config); | 234 | &config); |
255 | if (IS_ERR(rdev)) { | 235 | if (IS_ERR(rdev)) { |
@@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev) | |||
259 | return PTR_ERR(rdev); | 239 | return PTR_ERR(rdev); |
260 | } | 240 | } |
261 | 241 | ||
262 | ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode", | 242 | ret = of_property_read_u32(rdev->dev.of_node, |
243 | "x-powers,dcdc-workmode", | ||
263 | &workmode); | 244 | &workmode); |
264 | if (!ret) { | 245 | if (!ret) { |
265 | if (axp20x_set_dcdc_workmode(rdev, i, workmode)) | 246 | if (axp20x_set_dcdc_workmode(rdev, i, workmode)) |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e225711bb8bc..b899947d839d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev, | |||
632 | static DEVICE_ATTR(bypass, 0444, | 632 | static DEVICE_ATTR(bypass, 0444, |
633 | regulator_bypass_show, NULL); | 633 | regulator_bypass_show, NULL); |
634 | 634 | ||
635 | /* | ||
636 | * These are the only attributes are present for all regulators. | ||
637 | * Other attributes are a function of regulator functionality. | ||
638 | */ | ||
639 | static struct attribute *regulator_dev_attrs[] = { | ||
640 | &dev_attr_name.attr, | ||
641 | &dev_attr_num_users.attr, | ||
642 | &dev_attr_type.attr, | ||
643 | NULL, | ||
644 | }; | ||
645 | ATTRIBUTE_GROUPS(regulator_dev); | ||
646 | |||
647 | static void regulator_dev_release(struct device *dev) | ||
648 | { | ||
649 | struct regulator_dev *rdev = dev_get_drvdata(dev); | ||
650 | kfree(rdev); | ||
651 | } | ||
652 | |||
653 | static struct class regulator_class = { | ||
654 | .name = "regulator", | ||
655 | .dev_release = regulator_dev_release, | ||
656 | .dev_groups = regulator_dev_groups, | ||
657 | }; | ||
658 | |||
659 | /* Calculate the new optimum regulator operating mode based on the new total | 635 | /* Calculate the new optimum regulator operating mode based on the new total |
660 | * consumer load. All locks held by caller */ | 636 | * consumer load. All locks held by caller */ |
661 | static void drms_uA_update(struct regulator_dev *rdev) | 637 | static int drms_uA_update(struct regulator_dev *rdev) |
662 | { | 638 | { |
663 | struct regulator *sibling; | 639 | struct regulator *sibling; |
664 | int current_uA = 0, output_uV, input_uV, err; | 640 | int current_uA = 0, output_uV, input_uV, err; |
665 | unsigned int mode; | 641 | unsigned int mode; |
666 | 642 | ||
643 | /* | ||
644 | * first check to see if we can set modes at all, otherwise just | ||
645 | * tell the consumer everything is OK. | ||
646 | */ | ||
667 | err = regulator_check_drms(rdev); | 647 | err = regulator_check_drms(rdev); |
668 | if (err < 0 || !rdev->desc->ops->get_optimum_mode || | 648 | if (err < 0) |
669 | (!rdev->desc->ops->get_voltage && | 649 | return 0; |
670 | !rdev->desc->ops->get_voltage_sel) || | 650 | |
671 | !rdev->desc->ops->set_mode) | 651 | if (!rdev->desc->ops->get_optimum_mode) |
672 | return; | 652 | return 0; |
653 | |||
654 | if (!rdev->desc->ops->set_mode) | ||
655 | return -EINVAL; | ||
673 | 656 | ||
674 | /* get output voltage */ | 657 | /* get output voltage */ |
675 | output_uV = _regulator_get_voltage(rdev); | 658 | output_uV = _regulator_get_voltage(rdev); |
676 | if (output_uV <= 0) | 659 | if (output_uV <= 0) { |
677 | return; | 660 | rdev_err(rdev, "invalid output voltage found\n"); |
661 | return -EINVAL; | ||
662 | } | ||
678 | 663 | ||
679 | /* get input voltage */ | 664 | /* get input voltage */ |
680 | input_uV = 0; | 665 | input_uV = 0; |
@@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev) | |||
682 | input_uV = regulator_get_voltage(rdev->supply); | 667 | input_uV = regulator_get_voltage(rdev->supply); |
683 | if (input_uV <= 0) | 668 | if (input_uV <= 0) |
684 | input_uV = rdev->constraints->input_uV; | 669 | input_uV = rdev->constraints->input_uV; |
685 | if (input_uV <= 0) | 670 | if (input_uV <= 0) { |
686 | return; | 671 | rdev_err(rdev, "invalid input voltage found\n"); |
672 | return -EINVAL; | ||
673 | } | ||
687 | 674 | ||
688 | /* calc total requested load */ | 675 | /* calc total requested load */ |
689 | list_for_each_entry(sibling, &rdev->consumer_list, list) | 676 | list_for_each_entry(sibling, &rdev->consumer_list, list) |
@@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev) | |||
695 | 682 | ||
696 | /* check the new mode is allowed */ | 683 | /* check the new mode is allowed */ |
697 | err = regulator_mode_constrain(rdev, &mode); | 684 | err = regulator_mode_constrain(rdev, &mode); |
698 | if (err == 0) | 685 | if (err < 0) { |
699 | rdev->desc->ops->set_mode(rdev, mode); | 686 | rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", |
687 | current_uA, input_uV, output_uV); | ||
688 | return err; | ||
689 | } | ||
690 | |||
691 | err = rdev->desc->ops->set_mode(rdev, mode); | ||
692 | if (err < 0) | ||
693 | rdev_err(rdev, "failed to set optimum mode %x\n", mode); | ||
694 | |||
695 | return err; | ||
700 | } | 696 | } |
701 | 697 | ||
702 | static int suspend_set_state(struct regulator_dev *rdev, | 698 | static int suspend_set_state(struct regulator_dev *rdev, |
@@ -1488,7 +1484,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id) | |||
1488 | } | 1484 | } |
1489 | EXPORT_SYMBOL_GPL(regulator_get_optional); | 1485 | EXPORT_SYMBOL_GPL(regulator_get_optional); |
1490 | 1486 | ||
1491 | /* Locks held by regulator_put() */ | 1487 | /* regulator_list_mutex lock held by regulator_put() */ |
1492 | static void _regulator_put(struct regulator *regulator) | 1488 | static void _regulator_put(struct regulator *regulator) |
1493 | { | 1489 | { |
1494 | struct regulator_dev *rdev; | 1490 | struct regulator_dev *rdev; |
@@ -1503,12 +1499,14 @@ static void _regulator_put(struct regulator *regulator) | |||
1503 | /* remove any sysfs entries */ | 1499 | /* remove any sysfs entries */ |
1504 | if (regulator->dev) | 1500 | if (regulator->dev) |
1505 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); | 1501 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); |
1502 | mutex_lock(&rdev->mutex); | ||
1506 | kfree(regulator->supply_name); | 1503 | kfree(regulator->supply_name); |
1507 | list_del(®ulator->list); | 1504 | list_del(®ulator->list); |
1508 | kfree(regulator); | 1505 | kfree(regulator); |
1509 | 1506 | ||
1510 | rdev->open_count--; | 1507 | rdev->open_count--; |
1511 | rdev->exclusive = 0; | 1508 | rdev->exclusive = 0; |
1509 | mutex_unlock(&rdev->mutex); | ||
1512 | 1510 | ||
1513 | module_put(rdev->owner); | 1511 | module_put(rdev->owner); |
1514 | } | 1512 | } |
@@ -3024,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode); | |||
3024 | int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) | 3022 | int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) |
3025 | { | 3023 | { |
3026 | struct regulator_dev *rdev = regulator->rdev; | 3024 | struct regulator_dev *rdev = regulator->rdev; |
3027 | struct regulator *consumer; | 3025 | int ret; |
3028 | int ret, output_uV, input_uV = 0, total_uA_load = 0; | ||
3029 | unsigned int mode; | ||
3030 | |||
3031 | if (rdev->supply) | ||
3032 | input_uV = regulator_get_voltage(rdev->supply); | ||
3033 | 3026 | ||
3034 | mutex_lock(&rdev->mutex); | 3027 | mutex_lock(&rdev->mutex); |
3035 | |||
3036 | /* | ||
3037 | * first check to see if we can set modes at all, otherwise just | ||
3038 | * tell the consumer everything is OK. | ||
3039 | */ | ||
3040 | regulator->uA_load = uA_load; | 3028 | regulator->uA_load = uA_load; |
3041 | ret = regulator_check_drms(rdev); | 3029 | ret = drms_uA_update(rdev); |
3042 | if (ret < 0) { | ||
3043 | ret = 0; | ||
3044 | goto out; | ||
3045 | } | ||
3046 | |||
3047 | if (!rdev->desc->ops->get_optimum_mode) | ||
3048 | goto out; | ||
3049 | |||
3050 | /* | ||
3051 | * we can actually do this so any errors are indicators of | ||
3052 | * potential real failure. | ||
3053 | */ | ||
3054 | ret = -EINVAL; | ||
3055 | |||
3056 | if (!rdev->desc->ops->set_mode) | ||
3057 | goto out; | ||
3058 | |||
3059 | /* get output voltage */ | ||
3060 | output_uV = _regulator_get_voltage(rdev); | ||
3061 | if (output_uV <= 0) { | ||
3062 | rdev_err(rdev, "invalid output voltage found\n"); | ||
3063 | goto out; | ||
3064 | } | ||
3065 | |||
3066 | /* No supply? Use constraint voltage */ | ||
3067 | if (input_uV <= 0) | ||
3068 | input_uV = rdev->constraints->input_uV; | ||
3069 | if (input_uV <= 0) { | ||
3070 | rdev_err(rdev, "invalid input voltage found\n"); | ||
3071 | goto out; | ||
3072 | } | ||
3073 | |||
3074 | /* calc total requested load for this regulator */ | ||
3075 | list_for_each_entry(consumer, &rdev->consumer_list, list) | ||
3076 | total_uA_load += consumer->uA_load; | ||
3077 | |||
3078 | mode = rdev->desc->ops->get_optimum_mode(rdev, | ||
3079 | input_uV, output_uV, | ||
3080 | total_uA_load); | ||
3081 | ret = regulator_mode_constrain(rdev, &mode); | ||
3082 | if (ret < 0) { | ||
3083 | rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", | ||
3084 | total_uA_load, input_uV, output_uV); | ||
3085 | goto out; | ||
3086 | } | ||
3087 | |||
3088 | ret = rdev->desc->ops->set_mode(rdev, mode); | ||
3089 | if (ret < 0) { | ||
3090 | rdev_err(rdev, "failed to set optimum mode %x\n", mode); | ||
3091 | goto out; | ||
3092 | } | ||
3093 | ret = mode; | ||
3094 | out: | ||
3095 | mutex_unlock(&rdev->mutex); | 3030 | mutex_unlock(&rdev->mutex); |
3031 | |||
3096 | return ret; | 3032 | return ret; |
3097 | } | 3033 | } |
3098 | EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); | 3034 | EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); |
@@ -3434,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode) | |||
3434 | } | 3370 | } |
3435 | EXPORT_SYMBOL_GPL(regulator_mode_to_status); | 3371 | EXPORT_SYMBOL_GPL(regulator_mode_to_status); |
3436 | 3372 | ||
3373 | static struct attribute *regulator_dev_attrs[] = { | ||
3374 | &dev_attr_name.attr, | ||
3375 | &dev_attr_num_users.attr, | ||
3376 | &dev_attr_type.attr, | ||
3377 | &dev_attr_microvolts.attr, | ||
3378 | &dev_attr_microamps.attr, | ||
3379 | &dev_attr_opmode.attr, | ||
3380 | &dev_attr_state.attr, | ||
3381 | &dev_attr_status.attr, | ||
3382 | &dev_attr_bypass.attr, | ||
3383 | &dev_attr_requested_microamps.attr, | ||
3384 | &dev_attr_min_microvolts.attr, | ||
3385 | &dev_attr_max_microvolts.attr, | ||
3386 | &dev_attr_min_microamps.attr, | ||
3387 | &dev_attr_max_microamps.attr, | ||
3388 | &dev_attr_suspend_standby_state.attr, | ||
3389 | &dev_attr_suspend_mem_state.attr, | ||
3390 | &dev_attr_suspend_disk_state.attr, | ||
3391 | &dev_attr_suspend_standby_microvolts.attr, | ||
3392 | &dev_attr_suspend_mem_microvolts.attr, | ||
3393 | &dev_attr_suspend_disk_microvolts.attr, | ||
3394 | &dev_attr_suspend_standby_mode.attr, | ||
3395 | &dev_attr_suspend_mem_mode.attr, | ||
3396 | &dev_attr_suspend_disk_mode.attr, | ||
3397 | NULL | ||
3398 | }; | ||
3399 | |||
3437 | /* | 3400 | /* |
3438 | * To avoid cluttering sysfs (and memory) with useless state, only | 3401 | * To avoid cluttering sysfs (and memory) with useless state, only |
3439 | * create attributes that can be meaningfully displayed. | 3402 | * create attributes that can be meaningfully displayed. |
3440 | */ | 3403 | */ |
3441 | static int add_regulator_attributes(struct regulator_dev *rdev) | 3404 | static umode_t regulator_attr_is_visible(struct kobject *kobj, |
3405 | struct attribute *attr, int idx) | ||
3442 | { | 3406 | { |
3443 | struct device *dev = &rdev->dev; | 3407 | struct device *dev = kobj_to_dev(kobj); |
3408 | struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev); | ||
3444 | const struct regulator_ops *ops = rdev->desc->ops; | 3409 | const struct regulator_ops *ops = rdev->desc->ops; |
3445 | int status = 0; | 3410 | umode_t mode = attr->mode; |
3411 | |||
3412 | /* these three are always present */ | ||
3413 | if (attr == &dev_attr_name.attr || | ||
3414 | attr == &dev_attr_num_users.attr || | ||
3415 | attr == &dev_attr_type.attr) | ||
3416 | return mode; | ||
3446 | 3417 | ||
3447 | /* some attributes need specific methods to be displayed */ | 3418 | /* some attributes need specific methods to be displayed */ |
3448 | if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || | 3419 | if (attr == &dev_attr_microvolts.attr) { |
3449 | (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || | 3420 | if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || |
3450 | (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || | 3421 | (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || |
3451 | (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) { | 3422 | (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) || |
3452 | status = device_create_file(dev, &dev_attr_microvolts); | 3423 | (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1)) |
3453 | if (status < 0) | 3424 | return mode; |
3454 | return status; | 3425 | return 0; |
3455 | } | ||
3456 | if (ops->get_current_limit) { | ||
3457 | status = device_create_file(dev, &dev_attr_microamps); | ||
3458 | if (status < 0) | ||
3459 | return status; | ||
3460 | } | ||
3461 | if (ops->get_mode) { | ||
3462 | status = device_create_file(dev, &dev_attr_opmode); | ||
3463 | if (status < 0) | ||
3464 | return status; | ||
3465 | } | ||
3466 | if (rdev->ena_pin || ops->is_enabled) { | ||
3467 | status = device_create_file(dev, &dev_attr_state); | ||
3468 | if (status < 0) | ||
3469 | return status; | ||
3470 | } | ||
3471 | if (ops->get_status) { | ||
3472 | status = device_create_file(dev, &dev_attr_status); | ||
3473 | if (status < 0) | ||
3474 | return status; | ||
3475 | } | ||
3476 | if (ops->get_bypass) { | ||
3477 | status = device_create_file(dev, &dev_attr_bypass); | ||
3478 | if (status < 0) | ||
3479 | return status; | ||
3480 | } | 3426 | } |
3481 | 3427 | ||
3428 | if (attr == &dev_attr_microamps.attr) | ||
3429 | return ops->get_current_limit ? mode : 0; | ||
3430 | |||
3431 | if (attr == &dev_attr_opmode.attr) | ||
3432 | return ops->get_mode ? mode : 0; | ||
3433 | |||
3434 | if (attr == &dev_attr_state.attr) | ||
3435 | return (rdev->ena_pin || ops->is_enabled) ? mode : 0; | ||
3436 | |||
3437 | if (attr == &dev_attr_status.attr) | ||
3438 | return ops->get_status ? mode : 0; | ||
3439 | |||
3440 | if (attr == &dev_attr_bypass.attr) | ||
3441 | return ops->get_bypass ? mode : 0; | ||
3442 | |||
3482 | /* some attributes are type-specific */ | 3443 | /* some attributes are type-specific */ |
3483 | if (rdev->desc->type == REGULATOR_CURRENT) { | 3444 | if (attr == &dev_attr_requested_microamps.attr) |
3484 | status = device_create_file(dev, &dev_attr_requested_microamps); | 3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; |
3485 | if (status < 0) | ||
3486 | return status; | ||
3487 | } | ||
3488 | 3446 | ||
3489 | /* all the other attributes exist to support constraints; | 3447 | /* all the other attributes exist to support constraints; |
3490 | * don't show them if there are no constraints, or if the | 3448 | * don't show them if there are no constraints, or if the |
3491 | * relevant supporting methods are missing. | 3449 | * relevant supporting methods are missing. |
3492 | */ | 3450 | */ |
3493 | if (!rdev->constraints) | 3451 | if (!rdev->constraints) |
3494 | return status; | 3452 | return 0; |
3495 | 3453 | ||
3496 | /* constraints need specific supporting methods */ | 3454 | /* constraints need specific supporting methods */ |
3497 | if (ops->set_voltage || ops->set_voltage_sel) { | 3455 | if (attr == &dev_attr_min_microvolts.attr || |
3498 | status = device_create_file(dev, &dev_attr_min_microvolts); | 3456 | attr == &dev_attr_max_microvolts.attr) |
3499 | if (status < 0) | 3457 | return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0; |
3500 | return status; | 3458 | |
3501 | status = device_create_file(dev, &dev_attr_max_microvolts); | 3459 | if (attr == &dev_attr_min_microamps.attr || |
3502 | if (status < 0) | 3460 | attr == &dev_attr_max_microamps.attr) |
3503 | return status; | 3461 | return ops->set_current_limit ? mode : 0; |
3504 | } | 3462 | |
3505 | if (ops->set_current_limit) { | 3463 | if (attr == &dev_attr_suspend_standby_state.attr || |
3506 | status = device_create_file(dev, &dev_attr_min_microamps); | 3464 | attr == &dev_attr_suspend_mem_state.attr || |
3507 | if (status < 0) | 3465 | attr == &dev_attr_suspend_disk_state.attr) |
3508 | return status; | 3466 | return mode; |
3509 | status = device_create_file(dev, &dev_attr_max_microamps); | 3467 | |
3510 | if (status < 0) | 3468 | if (attr == &dev_attr_suspend_standby_microvolts.attr || |
3511 | return status; | 3469 | attr == &dev_attr_suspend_mem_microvolts.attr || |
3512 | } | 3470 | attr == &dev_attr_suspend_disk_microvolts.attr) |
3513 | 3471 | return ops->set_suspend_voltage ? mode : 0; | |
3514 | status = device_create_file(dev, &dev_attr_suspend_standby_state); | 3472 | |
3515 | if (status < 0) | 3473 | if (attr == &dev_attr_suspend_standby_mode.attr || |
3516 | return status; | 3474 | attr == &dev_attr_suspend_mem_mode.attr || |
3517 | status = device_create_file(dev, &dev_attr_suspend_mem_state); | 3475 | attr == &dev_attr_suspend_disk_mode.attr) |
3518 | if (status < 0) | 3476 | return ops->set_suspend_mode ? mode : 0; |
3519 | return status; | 3477 | |
3520 | status = device_create_file(dev, &dev_attr_suspend_disk_state); | 3478 | return mode; |
3521 | if (status < 0) | 3479 | } |
3522 | return status; | 3480 | |
3481 | static const struct attribute_group regulator_dev_group = { | ||
3482 | .attrs = regulator_dev_attrs, | ||
3483 | .is_visible = regulator_attr_is_visible, | ||
3484 | }; | ||
3485 | |||
3486 | static const struct attribute_group *regulator_dev_groups[] = { | ||
3487 | ®ulator_dev_group, | ||
3488 | NULL | ||
3489 | }; | ||
3523 | 3490 | ||
3524 | if (ops->set_suspend_voltage) { | 3491 | static void regulator_dev_release(struct device *dev) |
3525 | status = device_create_file(dev, | 3492 | { |
3526 | &dev_attr_suspend_standby_microvolts); | 3493 | struct regulator_dev *rdev = dev_get_drvdata(dev); |
3527 | if (status < 0) | 3494 | kfree(rdev); |
3528 | return status; | ||
3529 | status = device_create_file(dev, | ||
3530 | &dev_attr_suspend_mem_microvolts); | ||
3531 | if (status < 0) | ||
3532 | return status; | ||
3533 | status = device_create_file(dev, | ||
3534 | &dev_attr_suspend_disk_microvolts); | ||
3535 | if (status < 0) | ||
3536 | return status; | ||
3537 | } | ||
3538 | |||
3539 | if (ops->set_suspend_mode) { | ||
3540 | status = device_create_file(dev, | ||
3541 | &dev_attr_suspend_standby_mode); | ||
3542 | if (status < 0) | ||
3543 | return status; | ||
3544 | status = device_create_file(dev, | ||
3545 | &dev_attr_suspend_mem_mode); | ||
3546 | if (status < 0) | ||
3547 | return status; | ||
3548 | status = device_create_file(dev, | ||
3549 | &dev_attr_suspend_disk_mode); | ||
3550 | if (status < 0) | ||
3551 | return status; | ||
3552 | } | ||
3553 | |||
3554 | return status; | ||
3555 | } | 3495 | } |
3556 | 3496 | ||
3497 | static struct class regulator_class = { | ||
3498 | .name = "regulator", | ||
3499 | .dev_release = regulator_dev_release, | ||
3500 | .dev_groups = regulator_dev_groups, | ||
3501 | }; | ||
3502 | |||
3557 | static void rdev_init_debugfs(struct regulator_dev *rdev) | 3503 | static void rdev_init_debugfs(struct regulator_dev *rdev) |
3558 | { | 3504 | { |
3559 | rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); | 3505 | rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root); |
@@ -3573,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) | |||
3573 | /** | 3519 | /** |
3574 | * regulator_register - register regulator | 3520 | * regulator_register - register regulator |
3575 | * @regulator_desc: regulator to register | 3521 | * @regulator_desc: regulator to register |
3576 | * @config: runtime configuration for regulator | 3522 | * @cfg: runtime configuration for regulator |
3577 | * | 3523 | * |
3578 | * Called by regulator drivers to register a regulator. | 3524 | * Called by regulator drivers to register a regulator. |
3579 | * Returns a valid pointer to struct regulator_dev on success | 3525 | * Returns a valid pointer to struct regulator_dev on success |
@@ -3581,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) | |||
3581 | */ | 3527 | */ |
3582 | struct regulator_dev * | 3528 | struct regulator_dev * |
3583 | regulator_register(const struct regulator_desc *regulator_desc, | 3529 | regulator_register(const struct regulator_desc *regulator_desc, |
3584 | const struct regulator_config *config) | 3530 | const struct regulator_config *cfg) |
3585 | { | 3531 | { |
3586 | const struct regulation_constraints *constraints = NULL; | 3532 | const struct regulation_constraints *constraints = NULL; |
3587 | const struct regulator_init_data *init_data; | 3533 | const struct regulator_init_data *init_data; |
3588 | static atomic_t regulator_no = ATOMIC_INIT(0); | 3534 | struct regulator_config *config = NULL; |
3535 | static atomic_t regulator_no = ATOMIC_INIT(-1); | ||
3589 | struct regulator_dev *rdev; | 3536 | struct regulator_dev *rdev; |
3590 | struct device *dev; | 3537 | struct device *dev; |
3591 | int ret, i; | 3538 | int ret, i; |
3592 | const char *supply = NULL; | 3539 | const char *supply = NULL; |
3593 | 3540 | ||
3594 | if (regulator_desc == NULL || config == NULL) | 3541 | if (regulator_desc == NULL || cfg == NULL) |
3595 | return ERR_PTR(-EINVAL); | 3542 | return ERR_PTR(-EINVAL); |
3596 | 3543 | ||
3597 | dev = config->dev; | 3544 | dev = cfg->dev; |
3598 | WARN_ON(!dev); | 3545 | WARN_ON(!dev); |
3599 | 3546 | ||
3600 | if (regulator_desc->name == NULL || regulator_desc->ops == NULL) | 3547 | if (regulator_desc->name == NULL || regulator_desc->ops == NULL) |
@@ -3624,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
3624 | if (rdev == NULL) | 3571 | if (rdev == NULL) |
3625 | return ERR_PTR(-ENOMEM); | 3572 | return ERR_PTR(-ENOMEM); |
3626 | 3573 | ||
3627 | init_data = regulator_of_get_init_data(dev, regulator_desc, | 3574 | /* |
3575 | * Duplicate the config so the driver could override it after | ||
3576 | * parsing init data. | ||
3577 | */ | ||
3578 | config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL); | ||
3579 | if (config == NULL) { | ||
3580 | kfree(rdev); | ||
3581 | return ERR_PTR(-ENOMEM); | ||
3582 | } | ||
3583 | |||
3584 | init_data = regulator_of_get_init_data(dev, regulator_desc, config, | ||
3628 | &rdev->dev.of_node); | 3585 | &rdev->dev.of_node); |
3629 | if (!init_data) { | 3586 | if (!init_data) { |
3630 | init_data = config->init_data; | 3587 | init_data = config->init_data; |
@@ -3658,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
3658 | /* register with sysfs */ | 3615 | /* register with sysfs */ |
3659 | rdev->dev.class = ®ulator_class; | 3616 | rdev->dev.class = ®ulator_class; |
3660 | rdev->dev.parent = dev; | 3617 | rdev->dev.parent = dev; |
3661 | dev_set_name(&rdev->dev, "regulator.%d", | 3618 | dev_set_name(&rdev->dev, "regulator.%lu", |
3662 | atomic_inc_return(®ulator_no) - 1); | 3619 | (unsigned long) atomic_inc_return(®ulator_no)); |
3663 | ret = device_register(&rdev->dev); | 3620 | ret = device_register(&rdev->dev); |
3664 | if (ret != 0) { | 3621 | if (ret != 0) { |
3665 | put_device(&rdev->dev); | 3622 | put_device(&rdev->dev); |
@@ -3692,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
3692 | if (ret < 0) | 3649 | if (ret < 0) |
3693 | goto scrub; | 3650 | goto scrub; |
3694 | 3651 | ||
3695 | /* add attributes supported by this regulator */ | ||
3696 | ret = add_regulator_attributes(rdev); | ||
3697 | if (ret < 0) | ||
3698 | goto scrub; | ||
3699 | |||
3700 | if (init_data && init_data->supply_regulator) | 3652 | if (init_data && init_data->supply_regulator) |
3701 | supply = init_data->supply_regulator; | 3653 | supply = init_data->supply_regulator; |
3702 | else if (regulator_desc->supply_name) | 3654 | else if (regulator_desc->supply_name) |
@@ -3752,6 +3704,7 @@ add_dev: | |||
3752 | rdev_init_debugfs(rdev); | 3704 | rdev_init_debugfs(rdev); |
3753 | out: | 3705 | out: |
3754 | mutex_unlock(®ulator_list_mutex); | 3706 | mutex_unlock(®ulator_list_mutex); |
3707 | kfree(config); | ||
3755 | return rdev; | 3708 | return rdev; |
3756 | 3709 | ||
3757 | unset_supplies: | 3710 | unset_supplies: |
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index c78d2106d6cb..01343419555e 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/regmap.h> | 24 | #include <linux/regmap.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/of_gpio.h> | ||
27 | #include <linux/regulator/of_regulator.h> | 28 | #include <linux/regulator/of_regulator.h> |
28 | #include <linux/regulator/da9211.h> | 29 | #include <linux/regulator/da9211.h> |
29 | #include "da9211-regulator.h" | 30 | #include "da9211-regulator.h" |
@@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt( | |||
276 | continue; | 277 | continue; |
277 | 278 | ||
278 | pdata->init_data[n] = da9211_matches[i].init_data; | 279 | pdata->init_data[n] = da9211_matches[i].init_data; |
279 | 280 | pdata->reg_node[n] = da9211_matches[i].of_node; | |
281 | pdata->gpio_ren[n] = | ||
282 | of_get_named_gpio(da9211_matches[i].of_node, | ||
283 | "enable-gpios", 0); | ||
280 | n++; | 284 | n++; |
281 | } | 285 | } |
282 | 286 | ||
@@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip) | |||
364 | config.dev = chip->dev; | 368 | config.dev = chip->dev; |
365 | config.driver_data = chip; | 369 | config.driver_data = chip; |
366 | config.regmap = chip->regmap; | 370 | config.regmap = chip->regmap; |
367 | config.of_node = chip->dev->of_node; | 371 | config.of_node = chip->pdata->reg_node[i]; |
372 | |||
373 | if (gpio_is_valid(chip->pdata->gpio_ren[i])) { | ||
374 | config.ena_gpio = chip->pdata->gpio_ren[i]; | ||
375 | config.ena_gpio_initialized = true; | ||
376 | } else { | ||
377 | config.ena_gpio = -EINVAL; | ||
378 | config.ena_gpio_initialized = false; | ||
379 | } | ||
368 | 380 | ||
369 | chip->rdev[i] = devm_regulator_register(chip->dev, | 381 | chip->rdev[i] = devm_regulator_register(chip->dev, |
370 | &da9211_regulators[i], &config); | 382 | &da9211_regulators[i], &config); |
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index 6c43ab2d5121..3c25db89a021 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c | |||
@@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev) | |||
147 | return REGULATOR_MODE_NORMAL; | 147 | return REGULATOR_MODE_NORMAL; |
148 | } | 148 | } |
149 | 149 | ||
150 | static int slew_rates[] = { | 150 | static const int slew_rates[] = { |
151 | 64000, | 151 | 64000, |
152 | 32000, | 152 | 32000, |
153 | 16000, | 153 | 16000, |
@@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, | |||
296 | return PTR_ERR_OR_ZERO(di->rdev); | 296 | return PTR_ERR_OR_ZERO(di->rdev); |
297 | } | 297 | } |
298 | 298 | ||
299 | static struct regmap_config fan53555_regmap_config = { | 299 | static const struct regmap_config fan53555_regmap_config = { |
300 | .reg_bits = 8, | 300 | .reg_bits = 8, |
301 | .val_bits = 8, | 301 | .val_bits = 8, |
302 | }; | 302 | }; |
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h index 80ba2a35a04b..c74ac8734023 100644 --- a/drivers/regulator/internal.h +++ b/drivers/regulator/internal.h | |||
@@ -38,11 +38,13 @@ struct regulator { | |||
38 | #ifdef CONFIG_OF | 38 | #ifdef CONFIG_OF |
39 | struct regulator_init_data *regulator_of_get_init_data(struct device *dev, | 39 | struct regulator_init_data *regulator_of_get_init_data(struct device *dev, |
40 | const struct regulator_desc *desc, | 40 | const struct regulator_desc *desc, |
41 | struct regulator_config *config, | ||
41 | struct device_node **node); | 42 | struct device_node **node); |
42 | #else | 43 | #else |
43 | static inline struct regulator_init_data * | 44 | static inline struct regulator_init_data * |
44 | regulator_of_get_init_data(struct device *dev, | 45 | regulator_of_get_init_data(struct device *dev, |
45 | const struct regulator_desc *desc, | 46 | const struct regulator_desc *desc, |
47 | struct regulator_config *config, | ||
46 | struct device_node **node) | 48 | struct device_node **node) |
47 | { | 49 | { |
48 | return NULL; | 50 | return NULL; |
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c index 92fefd98da58..6e3a15fe00f1 100644 --- a/drivers/regulator/isl9305.c +++ b/drivers/regulator/isl9305.c | |||
@@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c, | |||
177 | 177 | ||
178 | #ifdef CONFIG_OF | 178 | #ifdef CONFIG_OF |
179 | static const struct of_device_id isl9305_dt_ids[] = { | 179 | static const struct of_device_id isl9305_dt_ids[] = { |
180 | { .compatible = "isl,isl9305" }, | 180 | { .compatible = "isl,isl9305" }, /* for backward compat., don't use */ |
181 | { .compatible = "isl,isl9305h" }, | 181 | { .compatible = "isil,isl9305" }, |
182 | { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */ | ||
183 | { .compatible = "isil,isl9305h" }, | ||
182 | {}, | 184 | {}, |
183 | }; | 185 | }; |
184 | #endif | 186 | #endif |
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c index 021d64d856bb..3de328ab41f3 100644 --- a/drivers/regulator/lp872x.c +++ b/drivers/regulator/lp872x.c | |||
@@ -106,7 +106,6 @@ struct lp872x { | |||
106 | struct device *dev; | 106 | struct device *dev; |
107 | enum lp872x_id chipid; | 107 | enum lp872x_id chipid; |
108 | struct lp872x_platform_data *pdata; | 108 | struct lp872x_platform_data *pdata; |
109 | struct regulator_dev **regulators; | ||
110 | int num_regulators; | 109 | int num_regulators; |
111 | enum lp872x_dvs_state dvs_pin; | 110 | enum lp872x_dvs_state dvs_pin; |
112 | int dvs_gpio; | 111 | int dvs_gpio; |
@@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp) | |||
801 | dev_err(lp->dev, "regulator register err"); | 800 | dev_err(lp->dev, "regulator register err"); |
802 | return PTR_ERR(rdev); | 801 | return PTR_ERR(rdev); |
803 | } | 802 | } |
804 | |||
805 | *(lp->regulators + i) = rdev; | ||
806 | } | 803 | } |
807 | 804 | ||
808 | return 0; | 805 | return 0; |
@@ -906,7 +903,7 @@ static struct lp872x_platform_data | |||
906 | static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) | 903 | static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) |
907 | { | 904 | { |
908 | struct lp872x *lp; | 905 | struct lp872x *lp; |
909 | int ret, size, num_regulators; | 906 | int ret; |
910 | const int lp872x_num_regulators[] = { | 907 | const int lp872x_num_regulators[] = { |
911 | [LP8720] = LP8720_NUM_REGULATORS, | 908 | [LP8720] = LP8720_NUM_REGULATORS, |
912 | [LP8725] = LP8725_NUM_REGULATORS, | 909 | [LP8725] = LP8725_NUM_REGULATORS, |
@@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) | |||
918 | 915 | ||
919 | lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); | 916 | lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); |
920 | if (!lp) | 917 | if (!lp) |
921 | goto err_mem; | 918 | return -ENOMEM; |
922 | |||
923 | num_regulators = lp872x_num_regulators[id->driver_data]; | ||
924 | size = sizeof(struct regulator_dev *) * num_regulators; | ||
925 | 919 | ||
926 | lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL); | 920 | lp->num_regulators = lp872x_num_regulators[id->driver_data]; |
927 | if (!lp->regulators) | ||
928 | goto err_mem; | ||
929 | 921 | ||
930 | lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config); | 922 | lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config); |
931 | if (IS_ERR(lp->regmap)) { | 923 | if (IS_ERR(lp->regmap)) { |
932 | ret = PTR_ERR(lp->regmap); | 924 | ret = PTR_ERR(lp->regmap); |
933 | dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); | 925 | dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); |
934 | goto err_dev; | 926 | return ret; |
935 | } | 927 | } |
936 | 928 | ||
937 | lp->dev = &cl->dev; | 929 | lp->dev = &cl->dev; |
938 | lp->pdata = dev_get_platdata(&cl->dev); | 930 | lp->pdata = dev_get_platdata(&cl->dev); |
939 | lp->chipid = id->driver_data; | 931 | lp->chipid = id->driver_data; |
940 | lp->num_regulators = num_regulators; | ||
941 | i2c_set_clientdata(cl, lp); | 932 | i2c_set_clientdata(cl, lp); |
942 | 933 | ||
943 | ret = lp872x_config(lp); | 934 | ret = lp872x_config(lp); |
944 | if (ret) | 935 | if (ret) |
945 | goto err_dev; | 936 | return ret; |
946 | 937 | ||
947 | return lp872x_regulator_register(lp); | 938 | return lp872x_regulator_register(lp); |
948 | |||
949 | err_mem: | ||
950 | return -ENOMEM; | ||
951 | err_dev: | ||
952 | return ret; | ||
953 | } | 939 | } |
954 | 940 | ||
955 | static const struct of_device_id lp872x_dt_ids[] = { | 941 | static const struct of_device_id lp872x_dt_ids[] = { |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index bf9a44c5fdd2..b3678d289619 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c | |||
@@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = { | |||
103 | static const struct regulator_desc max14577_supported_regulators[] = { | 103 | static const struct regulator_desc max14577_supported_regulators[] = { |
104 | [MAX14577_SAFEOUT] = { | 104 | [MAX14577_SAFEOUT] = { |
105 | .name = "SAFEOUT", | 105 | .name = "SAFEOUT", |
106 | .of_match = of_match_ptr("SAFEOUT"), | ||
107 | .regulators_node = of_match_ptr("regulators"), | ||
106 | .id = MAX14577_SAFEOUT, | 108 | .id = MAX14577_SAFEOUT, |
107 | .ops = &max14577_safeout_ops, | 109 | .ops = &max14577_safeout_ops, |
108 | .type = REGULATOR_VOLTAGE, | 110 | .type = REGULATOR_VOLTAGE, |
@@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = { | |||
114 | }, | 116 | }, |
115 | [MAX14577_CHARGER] = { | 117 | [MAX14577_CHARGER] = { |
116 | .name = "CHARGER", | 118 | .name = "CHARGER", |
119 | .of_match = of_match_ptr("CHARGER"), | ||
120 | .regulators_node = of_match_ptr("regulators"), | ||
117 | .id = MAX14577_CHARGER, | 121 | .id = MAX14577_CHARGER, |
118 | .ops = &max14577_charger_ops, | 122 | .ops = &max14577_charger_ops, |
119 | .type = REGULATOR_CURRENT, | 123 | .type = REGULATOR_CURRENT, |
@@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = { | |||
137 | static const struct regulator_desc max77836_supported_regulators[] = { | 141 | static const struct regulator_desc max77836_supported_regulators[] = { |
138 | [MAX14577_SAFEOUT] = { | 142 | [MAX14577_SAFEOUT] = { |
139 | .name = "SAFEOUT", | 143 | .name = "SAFEOUT", |
144 | .of_match = of_match_ptr("SAFEOUT"), | ||
145 | .regulators_node = of_match_ptr("regulators"), | ||
140 | .id = MAX14577_SAFEOUT, | 146 | .id = MAX14577_SAFEOUT, |
141 | .ops = &max14577_safeout_ops, | 147 | .ops = &max14577_safeout_ops, |
142 | .type = REGULATOR_VOLTAGE, | 148 | .type = REGULATOR_VOLTAGE, |
@@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = { | |||
148 | }, | 154 | }, |
149 | [MAX14577_CHARGER] = { | 155 | [MAX14577_CHARGER] = { |
150 | .name = "CHARGER", | 156 | .name = "CHARGER", |
157 | .of_match = of_match_ptr("CHARGER"), | ||
158 | .regulators_node = of_match_ptr("regulators"), | ||
151 | .id = MAX14577_CHARGER, | 159 | .id = MAX14577_CHARGER, |
152 | .ops = &max14577_charger_ops, | 160 | .ops = &max14577_charger_ops, |
153 | .type = REGULATOR_CURRENT, | 161 | .type = REGULATOR_CURRENT, |
@@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = { | |||
157 | }, | 165 | }, |
158 | [MAX77836_LDO1] = { | 166 | [MAX77836_LDO1] = { |
159 | .name = "LDO1", | 167 | .name = "LDO1", |
168 | .of_match = of_match_ptr("LDO1"), | ||
169 | .regulators_node = of_match_ptr("regulators"), | ||
160 | .id = MAX77836_LDO1, | 170 | .id = MAX77836_LDO1, |
161 | .ops = &max77836_ldo_ops, | 171 | .ops = &max77836_ldo_ops, |
162 | .type = REGULATOR_VOLTAGE, | 172 | .type = REGULATOR_VOLTAGE, |
@@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = { | |||
171 | }, | 181 | }, |
172 | [MAX77836_LDO2] = { | 182 | [MAX77836_LDO2] = { |
173 | .name = "LDO2", | 183 | .name = "LDO2", |
184 | .of_match = of_match_ptr("LDO2"), | ||
185 | .regulators_node = of_match_ptr("regulators"), | ||
174 | .id = MAX77836_LDO2, | 186 | .id = MAX77836_LDO2, |
175 | .ops = &max77836_ldo_ops, | 187 | .ops = &max77836_ldo_ops, |
176 | .type = REGULATOR_VOLTAGE, | 188 | .type = REGULATOR_VOLTAGE, |
@@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = { | |||
198 | { .name = "LDO2", }, | 210 | { .name = "LDO2", }, |
199 | }; | 211 | }; |
200 | 212 | ||
201 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, | ||
202 | enum maxim_device_type dev_type) | ||
203 | { | ||
204 | int ret; | ||
205 | struct device_node *np; | ||
206 | struct of_regulator_match *regulator_matches; | ||
207 | unsigned int regulator_matches_size; | ||
208 | |||
209 | np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators"); | ||
210 | if (!np) { | ||
211 | dev_err(&pdev->dev, "Failed to get child OF node for regulators\n"); | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | |||
215 | switch (dev_type) { | ||
216 | case MAXIM_DEVICE_TYPE_MAX77836: | ||
217 | regulator_matches = max77836_regulator_matches; | ||
218 | regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches); | ||
219 | break; | ||
220 | case MAXIM_DEVICE_TYPE_MAX14577: | ||
221 | default: | ||
222 | regulator_matches = max14577_regulator_matches; | ||
223 | regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches); | ||
224 | } | ||
225 | |||
226 | ret = of_regulator_match(&pdev->dev, np, regulator_matches, | ||
227 | regulator_matches_size); | ||
228 | if (ret < 0) | ||
229 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); | ||
230 | else | ||
231 | ret = 0; | ||
232 | |||
233 | of_node_put(np); | ||
234 | |||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | static inline struct regulator_init_data *match_init_data(int index, | 213 | static inline struct regulator_init_data *match_init_data(int index, |
239 | enum maxim_device_type dev_type) | 214 | enum maxim_device_type dev_type) |
240 | { | 215 | { |
@@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index, | |||
261 | } | 236 | } |
262 | } | 237 | } |
263 | #else /* CONFIG_OF */ | 238 | #else /* CONFIG_OF */ |
264 | static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev, | ||
265 | enum maxim_device_type dev_type) | ||
266 | { | ||
267 | return 0; | ||
268 | } | ||
269 | static inline struct regulator_init_data *match_init_data(int index, | 239 | static inline struct regulator_init_data *match_init_data(int index, |
270 | enum maxim_device_type dev_type) | 240 | enum maxim_device_type dev_type) |
271 | { | 241 | { |
@@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev) | |||
308 | { | 278 | { |
309 | struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); | 279 | struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent); |
310 | struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); | 280 | struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev); |
311 | int i, ret; | 281 | int i, ret = 0; |
312 | struct regulator_config config = {}; | 282 | struct regulator_config config = {}; |
313 | const struct regulator_desc *supported_regulators; | 283 | const struct regulator_desc *supported_regulators; |
314 | unsigned int supported_regulators_size; | 284 | unsigned int supported_regulators_size; |
315 | enum maxim_device_type dev_type = max14577->dev_type; | 285 | enum maxim_device_type dev_type = max14577->dev_type; |
316 | 286 | ||
317 | ret = max14577_regulator_dt_parse_pdata(pdev, dev_type); | ||
318 | if (ret) | ||
319 | return ret; | ||
320 | |||
321 | switch (dev_type) { | 287 | switch (dev_type) { |
322 | case MAXIM_DEVICE_TYPE_MAX77836: | 288 | case MAXIM_DEVICE_TYPE_MAX77836: |
323 | supported_regulators = max77836_supported_regulators; | 289 | supported_regulators = max77836_supported_regulators; |
@@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev) | |||
329 | supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); | 295 | supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators); |
330 | } | 296 | } |
331 | 297 | ||
332 | config.dev = &pdev->dev; | 298 | config.dev = max14577->dev; |
333 | config.driver_data = max14577; | 299 | config.driver_data = max14577; |
334 | 300 | ||
335 | for (i = 0; i < supported_regulators_size; i++) { | 301 | for (i = 0; i < supported_regulators_size; i++) { |
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c index 10d206266ac2..15fb1416bfbd 100644 --- a/drivers/regulator/max77686.c +++ b/drivers/regulator/max77686.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/bug.h> | 26 | #include <linux/bug.h> |
27 | #include <linux/err.h> | 27 | #include <linux/err.h> |
28 | #include <linux/gpio.h> | 28 | #include <linux/gpio.h> |
29 | #include <linux/of_gpio.h> | ||
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
30 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
31 | #include <linux/regulator/driver.h> | 32 | #include <linux/regulator/driver.h> |
@@ -46,6 +47,11 @@ | |||
46 | #define MAX77686_DVS_UVSTEP 12500 | 47 | #define MAX77686_DVS_UVSTEP 12500 |
47 | 48 | ||
48 | /* | 49 | /* |
50 | * Value for configuring buck[89] and LDO{20,21,22} as GPIO control. | ||
51 | * It is the same as 'off' for other regulators. | ||
52 | */ | ||
53 | #define MAX77686_GPIO_CONTROL 0x0 | ||
54 | /* | ||
49 | * Values used for configuring LDOs and bucks. | 55 | * Values used for configuring LDOs and bucks. |
50 | * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26 | 56 | * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26 |
51 | */ | 57 | */ |
@@ -82,6 +88,8 @@ enum max77686_ramp_rate { | |||
82 | }; | 88 | }; |
83 | 89 | ||
84 | struct max77686_data { | 90 | struct max77686_data { |
91 | u64 gpio_enabled:MAX77686_REGULATORS; | ||
92 | |||
85 | /* Array indexed by regulator id */ | 93 | /* Array indexed by regulator id */ |
86 | unsigned int opmode[MAX77686_REGULATORS]; | 94 | unsigned int opmode[MAX77686_REGULATORS]; |
87 | }; | 95 | }; |
@@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id) | |||
100 | } | 108 | } |
101 | } | 109 | } |
102 | 110 | ||
111 | /* | ||
112 | * When regulator is configured for GPIO control then it | ||
113 | * replaces "normal" mode. Any change from low power mode to normal | ||
114 | * should actually change to GPIO control. | ||
115 | * Map normal mode to proper value for such regulators. | ||
116 | */ | ||
117 | static unsigned int max77686_map_normal_mode(struct max77686_data *max77686, | ||
118 | int id) | ||
119 | { | ||
120 | switch (id) { | ||
121 | case MAX77686_BUCK8: | ||
122 | case MAX77686_BUCK9: | ||
123 | case MAX77686_LDO20 ... MAX77686_LDO22: | ||
124 | if (max77686->gpio_enabled & (1 << id)) | ||
125 | return MAX77686_GPIO_CONTROL; | ||
126 | } | ||
127 | |||
128 | return MAX77686_NORMAL; | ||
129 | } | ||
130 | |||
103 | /* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */ | 131 | /* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */ |
104 | static int max77686_set_suspend_disable(struct regulator_dev *rdev) | 132 | static int max77686_set_suspend_disable(struct regulator_dev *rdev) |
105 | { | 133 | { |
@@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev, | |||
136 | val = MAX77686_LDO_LOWPOWER_PWRREQ; | 164 | val = MAX77686_LDO_LOWPOWER_PWRREQ; |
137 | break; | 165 | break; |
138 | case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ | 166 | case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ |
139 | val = MAX77686_NORMAL; | 167 | val = max77686_map_normal_mode(max77686, id); |
140 | break; | 168 | break; |
141 | default: | 169 | default: |
142 | pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", | 170 | pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", |
@@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev, | |||
160 | { | 188 | { |
161 | unsigned int val; | 189 | unsigned int val; |
162 | struct max77686_data *max77686 = rdev_get_drvdata(rdev); | 190 | struct max77686_data *max77686 = rdev_get_drvdata(rdev); |
163 | int ret; | 191 | int ret, id = rdev_get_id(rdev); |
164 | 192 | ||
165 | switch (mode) { | 193 | switch (mode) { |
166 | case REGULATOR_MODE_STANDBY: /* switch off */ | 194 | case REGULATOR_MODE_STANDBY: /* switch off */ |
@@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev, | |||
170 | val = MAX77686_LDO_LOWPOWER_PWRREQ; | 198 | val = MAX77686_LDO_LOWPOWER_PWRREQ; |
171 | break; | 199 | break; |
172 | case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ | 200 | case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ |
173 | val = MAX77686_NORMAL; | 201 | val = max77686_map_normal_mode(max77686, id); |
174 | break; | 202 | break; |
175 | default: | 203 | default: |
176 | pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", | 204 | pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n", |
@@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev, | |||
184 | if (ret) | 212 | if (ret) |
185 | return ret; | 213 | return ret; |
186 | 214 | ||
187 | max77686->opmode[rdev_get_id(rdev)] = val; | 215 | max77686->opmode[id] = val; |
188 | return 0; | 216 | return 0; |
189 | } | 217 | } |
190 | 218 | ||
@@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev) | |||
197 | shift = max77686_get_opmode_shift(id); | 225 | shift = max77686_get_opmode_shift(id); |
198 | 226 | ||
199 | if (max77686->opmode[id] == MAX77686_OFF_PWRREQ) | 227 | if (max77686->opmode[id] == MAX77686_OFF_PWRREQ) |
200 | max77686->opmode[id] = MAX77686_NORMAL; | 228 | max77686->opmode[id] = max77686_map_normal_mode(max77686, id); |
201 | 229 | ||
202 | return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, | 230 | return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, |
203 | rdev->desc->enable_mask, | 231 | rdev->desc->enable_mask, |
@@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) | |||
229 | MAX77686_RAMP_RATE_MASK, ramp_value << 6); | 257 | MAX77686_RAMP_RATE_MASK, ramp_value << 6); |
230 | } | 258 | } |
231 | 259 | ||
260 | static int max77686_of_parse_cb(struct device_node *np, | ||
261 | const struct regulator_desc *desc, | ||
262 | struct regulator_config *config) | ||
263 | { | ||
264 | struct max77686_data *max77686 = config->driver_data; | ||
265 | |||
266 | switch (desc->id) { | ||
267 | case MAX77686_BUCK8: | ||
268 | case MAX77686_BUCK9: | ||
269 | case MAX77686_LDO20 ... MAX77686_LDO22: | ||
270 | config->ena_gpio = of_get_named_gpio(np, | ||
271 | "maxim,ena-gpios", 0); | ||
272 | config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH; | ||
273 | config->ena_gpio_initialized = true; | ||
274 | break; | ||
275 | default: | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | if (gpio_is_valid(config->ena_gpio)) { | ||
280 | max77686->gpio_enabled |= (1 << desc->id); | ||
281 | |||
282 | return regmap_update_bits(config->regmap, desc->enable_reg, | ||
283 | desc->enable_mask, | ||
284 | MAX77686_GPIO_CONTROL); | ||
285 | } | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
232 | static struct regulator_ops max77686_ops = { | 290 | static struct regulator_ops max77686_ops = { |
233 | .list_voltage = regulator_list_voltage_linear, | 291 | .list_voltage = regulator_list_voltage_linear, |
234 | .map_voltage = regulator_map_voltage_linear, | 292 | .map_voltage = regulator_map_voltage_linear, |
@@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = { | |||
283 | .name = "LDO"#num, \ | 341 | .name = "LDO"#num, \ |
284 | .of_match = of_match_ptr("LDO"#num), \ | 342 | .of_match = of_match_ptr("LDO"#num), \ |
285 | .regulators_node = of_match_ptr("voltage-regulators"), \ | 343 | .regulators_node = of_match_ptr("voltage-regulators"), \ |
344 | .of_parse_cb = max77686_of_parse_cb, \ | ||
286 | .id = MAX77686_LDO##num, \ | 345 | .id = MAX77686_LDO##num, \ |
287 | .ops = &max77686_ops, \ | 346 | .ops = &max77686_ops, \ |
288 | .type = REGULATOR_VOLTAGE, \ | 347 | .type = REGULATOR_VOLTAGE, \ |
@@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = { | |||
355 | .name = "BUCK"#num, \ | 414 | .name = "BUCK"#num, \ |
356 | .of_match = of_match_ptr("BUCK"#num), \ | 415 | .of_match = of_match_ptr("BUCK"#num), \ |
357 | .regulators_node = of_match_ptr("voltage-regulators"), \ | 416 | .regulators_node = of_match_ptr("voltage-regulators"), \ |
417 | .of_parse_cb = max77686_of_parse_cb, \ | ||
358 | .id = MAX77686_BUCK##num, \ | 418 | .id = MAX77686_BUCK##num, \ |
359 | .ops = &max77686_ops, \ | 419 | .ops = &max77686_ops, \ |
360 | .type = REGULATOR_VOLTAGE, \ | 420 | .type = REGULATOR_VOLTAGE, \ |
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c new file mode 100644 index 000000000000..c132ef527cdd --- /dev/null +++ b/drivers/regulator/max77843.c | |||
@@ -0,0 +1,227 @@ | |||
1 | /* | ||
2 | * max77843.c - Regulator driver for the Maxim MAX77843 | ||
3 | * | ||
4 | * Copyright (C) 2015 Samsung Electronics | ||
5 | * Author: Jaewon Kim <jaewon02.kim@samsung.com> | ||
6 | * Author: Beomho Seo <beomho.seo@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/regulator/driver.h> | ||
17 | #include <linux/regulator/machine.h> | ||
18 | #include <linux/mfd/max77843-private.h> | ||
19 | #include <linux/regulator/of_regulator.h> | ||
20 | |||
21 | enum max77843_regulator_type { | ||
22 | MAX77843_SAFEOUT1 = 0, | ||
23 | MAX77843_SAFEOUT2, | ||
24 | MAX77843_CHARGER, | ||
25 | |||
26 | MAX77843_NUM, | ||
27 | }; | ||
28 | |||
29 | static const unsigned int max77843_safeout_voltage_table[] = { | ||
30 | 4850000, | ||
31 | 4900000, | ||
32 | 4950000, | ||
33 | 3300000, | ||
34 | }; | ||
35 | |||
36 | static int max77843_reg_is_enabled(struct regulator_dev *rdev) | ||
37 | { | ||
38 | struct regmap *regmap = rdev->regmap; | ||
39 | int ret; | ||
40 | unsigned int reg; | ||
41 | |||
42 | ret = regmap_read(regmap, rdev->desc->enable_reg, ®); | ||
43 | if (ret) { | ||
44 | dev_err(&rdev->dev, "Fialed to read charger register\n"); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask; | ||
49 | } | ||
50 | |||
51 | static int max77843_reg_get_current_limit(struct regulator_dev *rdev) | ||
52 | { | ||
53 | struct regmap *regmap = rdev->regmap; | ||
54 | unsigned int chg_min_uA = rdev->constraints->min_uA; | ||
55 | unsigned int chg_max_uA = rdev->constraints->max_uA; | ||
56 | unsigned int val; | ||
57 | int ret; | ||
58 | unsigned int reg, sel; | ||
59 | |||
60 | ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, ®); | ||
61 | if (ret) { | ||
62 | dev_err(&rdev->dev, "Failed to read charger register\n"); | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK; | ||
67 | |||
68 | if (sel < 0x03) | ||
69 | sel = 0; | ||
70 | else | ||
71 | sel -= 2; | ||
72 | |||
73 | val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel; | ||
74 | if (val > chg_max_uA) | ||
75 | return -EINVAL; | ||
76 | |||
77 | return val; | ||
78 | } | ||
79 | |||
80 | static int max77843_reg_set_current_limit(struct regulator_dev *rdev, | ||
81 | int min_uA, int max_uA) | ||
82 | { | ||
83 | struct regmap *regmap = rdev->regmap; | ||
84 | unsigned int chg_min_uA = rdev->constraints->min_uA; | ||
85 | int sel = 0; | ||
86 | |||
87 | while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA) | ||
88 | sel++; | ||
89 | |||
90 | if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA) | ||
91 | return -EINVAL; | ||
92 | |||
93 | sel += 2; | ||
94 | |||
95 | return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel); | ||
96 | } | ||
97 | |||
98 | static struct regulator_ops max77843_charger_ops = { | ||
99 | .is_enabled = max77843_reg_is_enabled, | ||
100 | .enable = regulator_enable_regmap, | ||
101 | .disable = regulator_disable_regmap, | ||
102 | .get_current_limit = max77843_reg_get_current_limit, | ||
103 | .set_current_limit = max77843_reg_set_current_limit, | ||
104 | }; | ||
105 | |||
106 | static struct regulator_ops max77843_regulator_ops = { | ||
107 | .is_enabled = regulator_is_enabled_regmap, | ||
108 | .enable = regulator_enable_regmap, | ||
109 | .disable = regulator_disable_regmap, | ||
110 | .list_voltage = regulator_list_voltage_table, | ||
111 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
112 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | ||
113 | }; | ||
114 | |||
115 | static const struct regulator_desc max77843_supported_regulators[] = { | ||
116 | [MAX77843_SAFEOUT1] = { | ||
117 | .name = "SAFEOUT1", | ||
118 | .id = MAX77843_SAFEOUT1, | ||
119 | .ops = &max77843_regulator_ops, | ||
120 | .of_match = of_match_ptr("SAFEOUT1"), | ||
121 | .regulators_node = of_match_ptr("regulators"), | ||
122 | .type = REGULATOR_VOLTAGE, | ||
123 | .owner = THIS_MODULE, | ||
124 | .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table), | ||
125 | .volt_table = max77843_safeout_voltage_table, | ||
126 | .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, | ||
127 | .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1, | ||
128 | .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, | ||
129 | .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK, | ||
130 | }, | ||
131 | [MAX77843_SAFEOUT2] = { | ||
132 | .name = "SAFEOUT2", | ||
133 | .id = MAX77843_SAFEOUT2, | ||
134 | .ops = &max77843_regulator_ops, | ||
135 | .of_match = of_match_ptr("SAFEOUT2"), | ||
136 | .regulators_node = of_match_ptr("regulators"), | ||
137 | .type = REGULATOR_VOLTAGE, | ||
138 | .owner = THIS_MODULE, | ||
139 | .n_voltages = ARRAY_SIZE(max77843_safeout_voltage_table), | ||
140 | .volt_table = max77843_safeout_voltage_table, | ||
141 | .enable_reg = MAX77843_SYS_REG_SAFEOUTCTRL, | ||
142 | .enable_mask = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2, | ||
143 | .vsel_reg = MAX77843_SYS_REG_SAFEOUTCTRL, | ||
144 | .vsel_mask = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK, | ||
145 | }, | ||
146 | [MAX77843_CHARGER] = { | ||
147 | .name = "CHARGER", | ||
148 | .id = MAX77843_CHARGER, | ||
149 | .ops = &max77843_charger_ops, | ||
150 | .of_match = of_match_ptr("CHARGER"), | ||
151 | .regulators_node = of_match_ptr("regulators"), | ||
152 | .type = REGULATOR_CURRENT, | ||
153 | .owner = THIS_MODULE, | ||
154 | .enable_reg = MAX77843_CHG_REG_CHG_CNFG_00, | ||
155 | .enable_mask = MAX77843_CHG_MASK, | ||
156 | }, | ||
157 | }; | ||
158 | |||
159 | static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id) | ||
160 | { | ||
161 | switch (reg_id) { | ||
162 | case MAX77843_SAFEOUT1: | ||
163 | case MAX77843_SAFEOUT2: | ||
164 | return max77843->regmap; | ||
165 | case MAX77843_CHARGER: | ||
166 | return max77843->regmap_chg; | ||
167 | default: | ||
168 | return max77843->regmap; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | static int max77843_regulator_probe(struct platform_device *pdev) | ||
173 | { | ||
174 | struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent); | ||
175 | struct regulator_config config = {}; | ||
176 | int i; | ||
177 | |||
178 | config.dev = max77843->dev; | ||
179 | config.driver_data = max77843; | ||
180 | |||
181 | for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) { | ||
182 | struct regulator_dev *regulator; | ||
183 | |||
184 | config.regmap = max77843_get_regmap(max77843, | ||
185 | max77843_supported_regulators[i].id); | ||
186 | |||
187 | regulator = devm_regulator_register(&pdev->dev, | ||
188 | &max77843_supported_regulators[i], &config); | ||
189 | if (IS_ERR(regulator)) { | ||
190 | dev_err(&pdev->dev, | ||
191 | "Failed to regiser regulator-%d\n", i); | ||
192 | return PTR_ERR(regulator); | ||
193 | } | ||
194 | } | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static const struct platform_device_id max77843_regulator_id[] = { | ||
200 | { "max77843-regulator", }, | ||
201 | { /* sentinel */ }, | ||
202 | }; | ||
203 | |||
204 | static struct platform_driver max77843_regulator_driver = { | ||
205 | .driver = { | ||
206 | .name = "max77843-regulator", | ||
207 | }, | ||
208 | .probe = max77843_regulator_probe, | ||
209 | .id_table = max77843_regulator_id, | ||
210 | }; | ||
211 | |||
212 | static int __init max77843_regulator_init(void) | ||
213 | { | ||
214 | return platform_driver_register(&max77843_regulator_driver); | ||
215 | } | ||
216 | subsys_initcall(max77843_regulator_init); | ||
217 | |||
218 | static void __exit max77843_regulator_exit(void) | ||
219 | { | ||
220 | platform_driver_unregister(&max77843_regulator_driver); | ||
221 | } | ||
222 | module_exit(max77843_regulator_exit); | ||
223 | |||
224 | MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>"); | ||
225 | MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>"); | ||
226 | MODULE_DESCRIPTION("Maxim MAX77843 regulator driver"); | ||
227 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index c8bddcc8f911..81229579ece9 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c | |||
@@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev) | |||
115 | return REGULATOR_MODE_NORMAL; | 115 | return REGULATOR_MODE_NORMAL; |
116 | } | 116 | } |
117 | 117 | ||
118 | static struct regulator_ops max8649_dcdc_ops = { | 118 | static const struct regulator_ops max8649_dcdc_ops = { |
119 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | 119 | .set_voltage_sel = regulator_set_voltage_sel_regmap, |
120 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | 120 | .get_voltage_sel = regulator_get_voltage_sel_regmap, |
121 | .list_voltage = regulator_list_voltage_linear, | 121 | .list_voltage = regulator_list_voltage_linear, |
@@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = { | |||
143 | .enable_is_inverted = true, | 143 | .enable_is_inverted = true, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static struct regmap_config max8649_regmap_config = { | 146 | static const struct regmap_config max8649_regmap_config = { |
147 | .reg_bits = 8, | 147 | .reg_bits = 8, |
148 | .val_bits = 8, | 148 | .val_bits = 8, |
149 | }; | 149 | }; |
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c new file mode 100644 index 000000000000..a5b2f4762677 --- /dev/null +++ b/drivers/regulator/mt6397-regulator.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 MediaTek Inc. | ||
3 | * Author: Flora Fu <flora.fu@mediatek.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/regmap.h> | ||
19 | #include <linux/mfd/mt6397/core.h> | ||
20 | #include <linux/mfd/mt6397/registers.h> | ||
21 | #include <linux/regulator/driver.h> | ||
22 | #include <linux/regulator/machine.h> | ||
23 | #include <linux/regulator/mt6397-regulator.h> | ||
24 | #include <linux/regulator/of_regulator.h> | ||
25 | |||
26 | /* | ||
27 | * MT6397 regulators' information | ||
28 | * | ||
29 | * @desc: standard fields of regulator description. | ||
30 | * @qi: Mask for query enable signal status of regulators | ||
31 | * @vselon_reg: Register sections for hardware control mode of bucks | ||
32 | * @vselctrl_reg: Register for controlling the buck control mode. | ||
33 | * @vselctrl_mask: Mask for query buck's voltage control mode. | ||
34 | */ | ||
35 | struct mt6397_regulator_info { | ||
36 | struct regulator_desc desc; | ||
37 | u32 qi; | ||
38 | u32 vselon_reg; | ||
39 | u32 vselctrl_reg; | ||
40 | u32 vselctrl_mask; | ||
41 | }; | ||
42 | |||
43 | #define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \ | ||
44 | vosel, vosel_mask, voselon, vosel_ctrl) \ | ||
45 | [MT6397_ID_##vreg] = { \ | ||
46 | .desc = { \ | ||
47 | .name = #vreg, \ | ||
48 | .of_match = of_match_ptr(match), \ | ||
49 | .ops = &mt6397_volt_range_ops, \ | ||
50 | .type = REGULATOR_VOLTAGE, \ | ||
51 | .id = MT6397_ID_##vreg, \ | ||
52 | .owner = THIS_MODULE, \ | ||
53 | .n_voltages = (max - min)/step + 1, \ | ||
54 | .linear_ranges = volt_ranges, \ | ||
55 | .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ | ||
56 | .vsel_reg = vosel, \ | ||
57 | .vsel_mask = vosel_mask, \ | ||
58 | .enable_reg = enreg, \ | ||
59 | .enable_mask = BIT(0), \ | ||
60 | }, \ | ||
61 | .qi = BIT(13), \ | ||
62 | .vselon_reg = voselon, \ | ||
63 | .vselctrl_reg = vosel_ctrl, \ | ||
64 | .vselctrl_mask = BIT(1), \ | ||
65 | } | ||
66 | |||
67 | #define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \ | ||
68 | vosel_mask) \ | ||
69 | [MT6397_ID_##vreg] = { \ | ||
70 | .desc = { \ | ||
71 | .name = #vreg, \ | ||
72 | .of_match = of_match_ptr(match), \ | ||
73 | .ops = &mt6397_volt_table_ops, \ | ||
74 | .type = REGULATOR_VOLTAGE, \ | ||
75 | .id = MT6397_ID_##vreg, \ | ||
76 | .owner = THIS_MODULE, \ | ||
77 | .n_voltages = ARRAY_SIZE(ldo_volt_table), \ | ||
78 | .volt_table = ldo_volt_table, \ | ||
79 | .vsel_reg = vosel, \ | ||
80 | .vsel_mask = vosel_mask, \ | ||
81 | .enable_reg = enreg, \ | ||
82 | .enable_mask = BIT(enbit), \ | ||
83 | }, \ | ||
84 | .qi = BIT(15), \ | ||
85 | } | ||
86 | |||
87 | #define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt) \ | ||
88 | [MT6397_ID_##vreg] = { \ | ||
89 | .desc = { \ | ||
90 | .name = #vreg, \ | ||
91 | .of_match = of_match_ptr(match), \ | ||
92 | .ops = &mt6397_volt_fixed_ops, \ | ||
93 | .type = REGULATOR_VOLTAGE, \ | ||
94 | .id = MT6397_ID_##vreg, \ | ||
95 | .owner = THIS_MODULE, \ | ||
96 | .n_voltages = 1, \ | ||
97 | .enable_reg = enreg, \ | ||
98 | .enable_mask = BIT(enbit), \ | ||
99 | .min_uV = volt, \ | ||
100 | }, \ | ||
101 | .qi = BIT(15), \ | ||
102 | } | ||
103 | |||
104 | static const struct regulator_linear_range buck_volt_range1[] = { | ||
105 | REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250), | ||
106 | }; | ||
107 | |||
108 | static const struct regulator_linear_range buck_volt_range2[] = { | ||
109 | REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250), | ||
110 | }; | ||
111 | |||
112 | static const struct regulator_linear_range buck_volt_range3[] = { | ||
113 | REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000), | ||
114 | }; | ||
115 | |||
116 | static const u32 ldo_volt_table1[] = { | ||
117 | 1500000, 1800000, 2500000, 2800000, | ||
118 | }; | ||
119 | |||
120 | static const u32 ldo_volt_table2[] = { | ||
121 | 1800000, 3300000, | ||
122 | }; | ||
123 | |||
124 | static const u32 ldo_volt_table3[] = { | ||
125 | 3000000, 3300000, | ||
126 | }; | ||
127 | |||
128 | static const u32 ldo_volt_table4[] = { | ||
129 | 1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000, | ||
130 | }; | ||
131 | |||
132 | static const u32 ldo_volt_table5[] = { | ||
133 | 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000, | ||
134 | }; | ||
135 | |||
136 | static const u32 ldo_volt_table5_v2[] = { | ||
137 | 1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000, | ||
138 | }; | ||
139 | |||
140 | static const u32 ldo_volt_table6[] = { | ||
141 | 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000, | ||
142 | }; | ||
143 | |||
144 | static const u32 ldo_volt_table7[] = { | ||
145 | 1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000, | ||
146 | }; | ||
147 | |||
148 | static int mt6397_get_status(struct regulator_dev *rdev) | ||
149 | { | ||
150 | int ret; | ||
151 | u32 regval; | ||
152 | struct mt6397_regulator_info *info = rdev_get_drvdata(rdev); | ||
153 | |||
154 | ret = regmap_read(rdev->regmap, info->desc.enable_reg, ®val); | ||
155 | if (ret != 0) { | ||
156 | dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret); | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF; | ||
161 | } | ||
162 | |||
163 | static struct regulator_ops mt6397_volt_range_ops = { | ||
164 | .list_voltage = regulator_list_voltage_linear_range, | ||
165 | .map_voltage = regulator_map_voltage_linear_range, | ||
166 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | ||
167 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
168 | .set_voltage_time_sel = regulator_set_voltage_time_sel, | ||
169 | .enable = regulator_enable_regmap, | ||
170 | .disable = regulator_disable_regmap, | ||
171 | .is_enabled = regulator_is_enabled_regmap, | ||
172 | .get_status = mt6397_get_status, | ||
173 | }; | ||
174 | |||
175 | static struct regulator_ops mt6397_volt_table_ops = { | ||
176 | .list_voltage = regulator_list_voltage_table, | ||
177 | .map_voltage = regulator_map_voltage_iterate, | ||
178 | .set_voltage_sel = regulator_set_voltage_sel_regmap, | ||
179 | .get_voltage_sel = regulator_get_voltage_sel_regmap, | ||
180 | .set_voltage_time_sel = regulator_set_voltage_time_sel, | ||
181 | .enable = regulator_enable_regmap, | ||
182 | .disable = regulator_disable_regmap, | ||
183 | .is_enabled = regulator_is_enabled_regmap, | ||
184 | .get_status = mt6397_get_status, | ||
185 | }; | ||
186 | |||
187 | static struct regulator_ops mt6397_volt_fixed_ops = { | ||
188 | .list_voltage = regulator_list_voltage_linear, | ||
189 | .enable = regulator_enable_regmap, | ||
190 | .disable = regulator_disable_regmap, | ||
191 | .is_enabled = regulator_is_enabled_regmap, | ||
192 | .get_status = mt6397_get_status, | ||
193 | }; | ||
194 | |||
195 | /* The array is indexed by id(MT6397_ID_XXX) */ | ||
196 | static struct mt6397_regulator_info mt6397_regulators[] = { | ||
197 | MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250, | ||
198 | buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f, | ||
199 | MT6397_VCA15_CON10, MT6397_VCA15_CON5), | ||
200 | MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250, | ||
201 | buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f, | ||
202 | MT6397_VPCA7_CON10, MT6397_VPCA7_CON5), | ||
203 | MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250, | ||
204 | buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9, | ||
205 | 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5), | ||
206 | MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250, | ||
207 | buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9, | ||
208 | 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5), | ||
209 | MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250, | ||
210 | buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f, | ||
211 | MT6397_VCORE_CON10, MT6397_VCORE_CON5), | ||
212 | MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1, | ||
213 | MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f, | ||
214 | MT6397_VGPU_CON10, MT6397_VGPU_CON5), | ||
215 | MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2, | ||
216 | MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f, | ||
217 | MT6397_VDRM_CON10, MT6397_VDRM_CON5), | ||
218 | MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000, | ||
219 | buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f, | ||
220 | MT6397_VIO18_CON10, MT6397_VIO18_CON5), | ||
221 | MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000), | ||
222 | MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000), | ||
223 | MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1, | ||
224 | MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0), | ||
225 | MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000), | ||
226 | MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000), | ||
227 | MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2, | ||
228 | MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10), | ||
229 | MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3, | ||
230 | MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80), | ||
231 | MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3, | ||
232 | MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10), | ||
233 | MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4, | ||
234 | MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0), | ||
235 | MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5, | ||
236 | MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0), | ||
237 | MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5, | ||
238 | MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0), | ||
239 | MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5, | ||
240 | MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0), | ||
241 | MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6, | ||
242 | MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0), | ||
243 | MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5, | ||
244 | MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0), | ||
245 | MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7, | ||
246 | MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00), | ||
247 | }; | ||
248 | |||
249 | static int mt6397_set_buck_vosel_reg(struct platform_device *pdev) | ||
250 | { | ||
251 | struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent); | ||
252 | int i; | ||
253 | u32 regval; | ||
254 | |||
255 | for (i = 0; i < MT6397_MAX_REGULATOR; i++) { | ||
256 | if (mt6397_regulators[i].vselctrl_reg) { | ||
257 | if (regmap_read(mt6397->regmap, | ||
258 | mt6397_regulators[i].vselctrl_reg, | ||
259 | ®val) < 0) { | ||
260 | dev_err(&pdev->dev, | ||
261 | "Failed to read buck ctrl\n"); | ||
262 | return -EIO; | ||
263 | } | ||
264 | |||
265 | if (regval & mt6397_regulators[i].vselctrl_mask) { | ||
266 | mt6397_regulators[i].desc.vsel_reg = | ||
267 | mt6397_regulators[i].vselon_reg; | ||
268 | } | ||
269 | } | ||
270 | } | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int mt6397_regulator_probe(struct platform_device *pdev) | ||
276 | { | ||
277 | struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent); | ||
278 | struct regulator_config config = {}; | ||
279 | struct regulator_dev *rdev; | ||
280 | int i; | ||
281 | u32 reg_value, version; | ||
282 | |||
283 | /* Query buck controller to select activated voltage register part */ | ||
284 | if (mt6397_set_buck_vosel_reg(pdev)) | ||
285 | return -EIO; | ||
286 | |||
287 | /* Read PMIC chip revision to update constraints and voltage table */ | ||
288 | if (regmap_read(mt6397->regmap, MT6397_CID, ®_value) < 0) { | ||
289 | dev_err(&pdev->dev, "Failed to read Chip ID\n"); | ||
290 | return -EIO; | ||
291 | } | ||
292 | dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value); | ||
293 | |||
294 | version = (reg_value & 0xFF); | ||
295 | switch (version) { | ||
296 | case MT6397_REGULATOR_ID91: | ||
297 | mt6397_regulators[MT6397_ID_VGP2].desc.volt_table = | ||
298 | ldo_volt_table5_v2; | ||
299 | break; | ||
300 | default: | ||
301 | break; | ||
302 | } | ||
303 | |||
304 | for (i = 0; i < MT6397_MAX_REGULATOR; i++) { | ||
305 | config.dev = &pdev->dev; | ||
306 | config.driver_data = &mt6397_regulators[i]; | ||
307 | config.regmap = mt6397->regmap; | ||
308 | rdev = devm_regulator_register(&pdev->dev, | ||
309 | &mt6397_regulators[i].desc, &config); | ||
310 | if (IS_ERR(rdev)) { | ||
311 | dev_err(&pdev->dev, "failed to register %s\n", | ||
312 | mt6397_regulators[i].desc.name); | ||
313 | return PTR_ERR(rdev); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | static struct platform_driver mt6397_regulator_driver = { | ||
321 | .driver = { | ||
322 | .name = "mt6397-regulator", | ||
323 | }, | ||
324 | .probe = mt6397_regulator_probe, | ||
325 | }; | ||
326 | |||
327 | module_platform_driver(mt6397_regulator_driver); | ||
328 | |||
329 | MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>"); | ||
330 | MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC"); | ||
331 | MODULE_LICENSE("GPL"); | ||
332 | MODULE_ALIAS("platform:mt6397-regulator"); | ||
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 91eaaf010524..24e812c48d93 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c | |||
@@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match); | |||
270 | 270 | ||
271 | struct regulator_init_data *regulator_of_get_init_data(struct device *dev, | 271 | struct regulator_init_data *regulator_of_get_init_data(struct device *dev, |
272 | const struct regulator_desc *desc, | 272 | const struct regulator_desc *desc, |
273 | struct regulator_config *config, | ||
273 | struct device_node **node) | 274 | struct device_node **node) |
274 | { | 275 | { |
275 | struct device_node *search, *child; | 276 | struct device_node *search, *child; |
@@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev, | |||
307 | break; | 308 | break; |
308 | } | 309 | } |
309 | 310 | ||
311 | if (desc->of_parse_cb) { | ||
312 | if (desc->of_parse_cb(child, desc, config)) { | ||
313 | dev_err(dev, | ||
314 | "driver callback failed to parse DT for regulator %s\n", | ||
315 | child->name); | ||
316 | init_data = NULL; | ||
317 | break; | ||
318 | } | ||
319 | } | ||
320 | |||
310 | of_node_get(child); | 321 | of_node_get(child); |
311 | *node = child; | 322 | *node = child; |
312 | break; | 323 | break; |
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index c879dff597ee..8cc8d1877c44 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c | |||
@@ -56,7 +56,7 @@ | |||
56 | #define PFUZE100_VGEN5VOL 0x70 | 56 | #define PFUZE100_VGEN5VOL 0x70 |
57 | #define PFUZE100_VGEN6VOL 0x71 | 57 | #define PFUZE100_VGEN6VOL 0x71 |
58 | 58 | ||
59 | enum chips { PFUZE100, PFUZE200 }; | 59 | enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 }; |
60 | 60 | ||
61 | struct pfuze_regulator { | 61 | struct pfuze_regulator { |
62 | struct regulator_desc desc; | 62 | struct regulator_desc desc; |
@@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = { | |||
80 | 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000, | 80 | 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static const int pfuze3000_sw2lo[] = { | ||
84 | 1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, | ||
85 | }; | ||
86 | |||
87 | static const int pfuze3000_sw2hi[] = { | ||
88 | 2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000, | ||
89 | }; | ||
90 | |||
83 | static const struct i2c_device_id pfuze_device_id[] = { | 91 | static const struct i2c_device_id pfuze_device_id[] = { |
84 | {.name = "pfuze100", .driver_data = PFUZE100}, | 92 | {.name = "pfuze100", .driver_data = PFUZE100}, |
85 | {.name = "pfuze200", .driver_data = PFUZE200}, | 93 | {.name = "pfuze200", .driver_data = PFUZE200}, |
94 | {.name = "pfuze3000", .driver_data = PFUZE3000}, | ||
86 | { } | 95 | { } |
87 | }; | 96 | }; |
88 | MODULE_DEVICE_TABLE(i2c, pfuze_device_id); | 97 | MODULE_DEVICE_TABLE(i2c, pfuze_device_id); |
@@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id); | |||
90 | static const struct of_device_id pfuze_dt_ids[] = { | 99 | static const struct of_device_id pfuze_dt_ids[] = { |
91 | { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100}, | 100 | { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100}, |
92 | { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200}, | 101 | { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200}, |
102 | { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000}, | ||
93 | { } | 103 | { } |
94 | }; | 104 | }; |
95 | MODULE_DEVICE_TABLE(of, pfuze_dt_ids); | 105 | MODULE_DEVICE_TABLE(of, pfuze_dt_ids); |
@@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = { | |||
219 | .stby_mask = 0x20, \ | 229 | .stby_mask = 0x20, \ |
220 | } | 230 | } |
221 | 231 | ||
232 | #define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step) { \ | ||
233 | .desc = { \ | ||
234 | .name = #_name, \ | ||
235 | .n_voltages = ((max) - (min)) / (step) + 1, \ | ||
236 | .ops = &pfuze100_ldo_regulator_ops, \ | ||
237 | .type = REGULATOR_VOLTAGE, \ | ||
238 | .id = _chip ## _ ## _name, \ | ||
239 | .owner = THIS_MODULE, \ | ||
240 | .min_uV = (min), \ | ||
241 | .uV_step = (step), \ | ||
242 | .vsel_reg = (base), \ | ||
243 | .vsel_mask = 0x3, \ | ||
244 | .enable_reg = (base), \ | ||
245 | .enable_mask = 0x10, \ | ||
246 | }, \ | ||
247 | .stby_reg = (base), \ | ||
248 | .stby_mask = 0x20, \ | ||
249 | } | ||
250 | |||
251 | |||
252 | #define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \ | ||
253 | .desc = { \ | ||
254 | .name = #_name,\ | ||
255 | .n_voltages = ((max) - (min)) / (step) + 1, \ | ||
256 | .ops = &pfuze100_sw_regulator_ops, \ | ||
257 | .type = REGULATOR_VOLTAGE, \ | ||
258 | .id = _chip ## _ ## _name, \ | ||
259 | .owner = THIS_MODULE, \ | ||
260 | .min_uV = (min), \ | ||
261 | .uV_step = (step), \ | ||
262 | .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ | ||
263 | .vsel_mask = 0x7, \ | ||
264 | }, \ | ||
265 | .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ | ||
266 | .stby_mask = 0x7, \ | ||
267 | } | ||
268 | |||
269 | #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \ | ||
270 | .desc = { \ | ||
271 | .name = #_name,\ | ||
272 | .n_voltages = ((max) - (min)) / (step) + 1, \ | ||
273 | .ops = &pfuze100_sw_regulator_ops, \ | ||
274 | .type = REGULATOR_VOLTAGE, \ | ||
275 | .id = _chip ## _ ## _name, \ | ||
276 | .owner = THIS_MODULE, \ | ||
277 | .min_uV = (min), \ | ||
278 | .uV_step = (step), \ | ||
279 | .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ | ||
280 | .vsel_mask = 0xf, \ | ||
281 | }, \ | ||
282 | .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ | ||
283 | .stby_mask = 0xf, \ | ||
284 | } | ||
285 | |||
222 | /* PFUZE100 */ | 286 | /* PFUZE100 */ |
223 | static struct pfuze_regulator pfuze100_regulators[] = { | 287 | static struct pfuze_regulator pfuze100_regulators[] = { |
224 | PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), | 288 | PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000), |
@@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = { | |||
254 | PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), | 318 | PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), |
255 | }; | 319 | }; |
256 | 320 | ||
321 | static struct pfuze_regulator pfuze3000_regulators[] = { | ||
322 | PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000), | ||
323 | PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000), | ||
324 | PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), | ||
325 | PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), | ||
326 | PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst), | ||
327 | PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), | ||
328 | PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000), | ||
329 | PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000), | ||
330 | PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000), | ||
331 | PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000), | ||
332 | PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000), | ||
333 | PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000), | ||
334 | PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000), | ||
335 | }; | ||
336 | |||
257 | static struct pfuze_regulator *pfuze_regulators; | 337 | static struct pfuze_regulator *pfuze_regulators; |
258 | 338 | ||
259 | #ifdef CONFIG_OF | 339 | #ifdef CONFIG_OF |
@@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = { | |||
294 | { .name = "vgen6", }, | 374 | { .name = "vgen6", }, |
295 | }; | 375 | }; |
296 | 376 | ||
377 | /* PFUZE3000 */ | ||
378 | static struct of_regulator_match pfuze3000_matches[] = { | ||
379 | |||
380 | { .name = "sw1a", }, | ||
381 | { .name = "sw1b", }, | ||
382 | { .name = "sw2", }, | ||
383 | { .name = "sw3", }, | ||
384 | { .name = "swbst", }, | ||
385 | { .name = "vsnvs", }, | ||
386 | { .name = "vrefddr", }, | ||
387 | { .name = "vldo1", }, | ||
388 | { .name = "vldo2", }, | ||
389 | { .name = "vccsd", }, | ||
390 | { .name = "v33", }, | ||
391 | { .name = "vldo3", }, | ||
392 | { .name = "vldo4", }, | ||
393 | }; | ||
394 | |||
297 | static struct of_regulator_match *pfuze_matches; | 395 | static struct of_regulator_match *pfuze_matches; |
298 | 396 | ||
299 | static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) | 397 | static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) |
@@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip) | |||
313 | } | 411 | } |
314 | 412 | ||
315 | switch (chip->chip_id) { | 413 | switch (chip->chip_id) { |
414 | case PFUZE3000: | ||
415 | pfuze_matches = pfuze3000_matches; | ||
416 | ret = of_regulator_match(dev, parent, pfuze3000_matches, | ||
417 | ARRAY_SIZE(pfuze3000_matches)); | ||
418 | break; | ||
316 | case PFUZE200: | 419 | case PFUZE200: |
317 | pfuze_matches = pfuze200_matches; | 420 | pfuze_matches = pfuze200_matches; |
318 | ret = of_regulator_match(dev, parent, pfuze200_matches, | 421 | ret = of_regulator_match(dev, parent, pfuze200_matches, |
@@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip) | |||
378 | * as ID=8 in PFUZE100 | 481 | * as ID=8 in PFUZE100 |
379 | */ | 482 | */ |
380 | dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); | 483 | dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8"); |
381 | } else if ((value & 0x0f) != pfuze_chip->chip_id) { | 484 | } else if ((value & 0x0f) != pfuze_chip->chip_id && |
485 | (value & 0xf0) >> 4 != pfuze_chip->chip_id) { | ||
382 | /* device id NOT match with your setting */ | 486 | /* device id NOT match with your setting */ |
383 | dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); | 487 | dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value); |
384 | return -ENODEV; | 488 | return -ENODEV; |
@@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
417 | int i, ret; | 521 | int i, ret; |
418 | const struct of_device_id *match; | 522 | const struct of_device_id *match; |
419 | u32 regulator_num; | 523 | u32 regulator_num; |
420 | u32 sw_check_start, sw_check_end; | 524 | u32 sw_check_start, sw_check_end, sw_hi = 0x40; |
421 | 525 | ||
422 | pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), | 526 | pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip), |
423 | GFP_KERNEL); | 527 | GFP_KERNEL); |
@@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
458 | 562 | ||
459 | /* use the right regulators after identify the right device */ | 563 | /* use the right regulators after identify the right device */ |
460 | switch (pfuze_chip->chip_id) { | 564 | switch (pfuze_chip->chip_id) { |
565 | case PFUZE3000: | ||
566 | pfuze_regulators = pfuze3000_regulators; | ||
567 | regulator_num = ARRAY_SIZE(pfuze3000_regulators); | ||
568 | sw_check_start = PFUZE3000_SW2; | ||
569 | sw_check_end = PFUZE3000_SW2; | ||
570 | sw_hi = 1 << 3; | ||
571 | break; | ||
461 | case PFUZE200: | 572 | case PFUZE200: |
462 | pfuze_regulators = pfuze200_regulators; | 573 | pfuze_regulators = pfuze200_regulators; |
463 | regulator_num = ARRAY_SIZE(pfuze200_regulators); | 574 | regulator_num = ARRAY_SIZE(pfuze200_regulators); |
464 | sw_check_start = PFUZE200_SW2; | 575 | sw_check_start = PFUZE200_SW2; |
465 | sw_check_end = PFUZE200_SW3B; | 576 | sw_check_end = PFUZE200_SW3B; |
466 | break; | 577 | break; |
467 | |||
468 | case PFUZE100: | 578 | case PFUZE100: |
469 | default: | 579 | default: |
470 | pfuze_regulators = pfuze100_regulators; | 580 | pfuze_regulators = pfuze100_regulators; |
@@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
474 | break; | 584 | break; |
475 | } | 585 | } |
476 | dev_info(&client->dev, "pfuze%s found.\n", | 586 | dev_info(&client->dev, "pfuze%s found.\n", |
477 | (pfuze_chip->chip_id == PFUZE100) ? "100" : "200"); | 587 | (pfuze_chip->chip_id == PFUZE100) ? "100" : |
588 | ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000")); | ||
478 | 589 | ||
479 | memcpy(pfuze_chip->regulator_descs, pfuze_regulators, | 590 | memcpy(pfuze_chip->regulator_descs, pfuze_regulators, |
480 | sizeof(pfuze_chip->regulator_descs)); | 591 | sizeof(pfuze_chip->regulator_descs)); |
@@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client, | |||
498 | /* SW2~SW4 high bit check and modify the voltage value table */ | 609 | /* SW2~SW4 high bit check and modify the voltage value table */ |
499 | if (i >= sw_check_start && i <= sw_check_end) { | 610 | if (i >= sw_check_start && i <= sw_check_end) { |
500 | regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); | 611 | regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val); |
501 | if (val & 0x40) { | 612 | if (val & sw_hi) { |
502 | desc->min_uV = 800000; | 613 | if (pfuze_chip->chip_id == PFUZE3000) { |
503 | desc->uV_step = 50000; | 614 | desc->volt_table = pfuze3000_sw2hi; |
504 | desc->n_voltages = 51; | 615 | desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi); |
616 | } else { | ||
617 | desc->min_uV = 800000; | ||
618 | desc->uV_step = 50000; | ||
619 | desc->n_voltages = 51; | ||
620 | } | ||
505 | } | 621 | } |
506 | } | 622 | } |
507 | 623 | ||
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c index 8364ff331a81..e8647f7cf25e 100644 --- a/drivers/regulator/qcom_rpm-regulator.c +++ b/drivers/regulator/qcom_rpm-regulator.c | |||
@@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev, | |||
227 | return uV; | 227 | return uV; |
228 | 228 | ||
229 | mutex_lock(&vreg->lock); | 229 | mutex_lock(&vreg->lock); |
230 | vreg->uV = uV; | ||
231 | if (vreg->is_enabled) | 230 | if (vreg->is_enabled) |
232 | ret = rpm_reg_write(vreg, req, vreg->uV / 1000); | 231 | ret = rpm_reg_write(vreg, req, uV / 1000); |
232 | |||
233 | if (!ret) | ||
234 | vreg->uV = uV; | ||
233 | mutex_unlock(&vreg->lock); | 235 | mutex_unlock(&vreg->lock); |
234 | 236 | ||
235 | return ret; | 237 | return ret; |
@@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev, | |||
252 | return uV; | 254 | return uV; |
253 | 255 | ||
254 | mutex_lock(&vreg->lock); | 256 | mutex_lock(&vreg->lock); |
255 | vreg->uV = uV; | ||
256 | if (vreg->is_enabled) | 257 | if (vreg->is_enabled) |
257 | ret = rpm_reg_write(vreg, req, vreg->uV); | 258 | ret = rpm_reg_write(vreg, req, uV); |
259 | |||
260 | if (!ret) | ||
261 | vreg->uV = uV; | ||
258 | mutex_unlock(&vreg->lock); | 262 | mutex_unlock(&vreg->lock); |
259 | 263 | ||
260 | return ret; | 264 | return ret; |
@@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev) | |||
674 | vreg->desc.owner = THIS_MODULE; | 678 | vreg->desc.owner = THIS_MODULE; |
675 | vreg->desc.type = REGULATOR_VOLTAGE; | 679 | vreg->desc.type = REGULATOR_VOLTAGE; |
676 | vreg->desc.name = pdev->dev.of_node->name; | 680 | vreg->desc.name = pdev->dev.of_node->name; |
681 | vreg->desc.supply_name = "vin"; | ||
677 | 682 | ||
678 | vreg->rpm = dev_get_drvdata(pdev->dev.parent); | 683 | vreg->rpm = dev_get_drvdata(pdev->dev.parent); |
679 | if (!vreg->rpm) { | 684 | if (!vreg->rpm) { |
@@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev) | |||
768 | break; | 773 | break; |
769 | } | 774 | } |
770 | 775 | ||
771 | if (force_mode < 0) { | 776 | if (force_mode == -1) { |
772 | dev_err(&pdev->dev, "invalid force mode\n"); | 777 | dev_err(&pdev->dev, "invalid force mode\n"); |
773 | return -EINVAL; | 778 | return -EINVAL; |
774 | } | 779 | } |
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index c94a3e0f3b91..1f93b752a81c 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c | |||
@@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) | |||
97 | RK808_RAMP_RATE_MASK, ramp_value); | 97 | RK808_RAMP_RATE_MASK, ramp_value); |
98 | } | 98 | } |
99 | 99 | ||
100 | int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) | 100 | static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) |
101 | { | 101 | { |
102 | unsigned int reg; | 102 | unsigned int reg; |
103 | int sel = regulator_map_voltage_linear_range(rdev, uv, uv); | 103 | int sel = regulator_map_voltage_linear_range(rdev, uv, uv); |
@@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv) | |||
112 | sel); | 112 | sel); |
113 | } | 113 | } |
114 | 114 | ||
115 | int rk808_set_suspend_enable(struct regulator_dev *rdev) | 115 | static int rk808_set_suspend_enable(struct regulator_dev *rdev) |
116 | { | 116 | { |
117 | unsigned int reg; | 117 | unsigned int reg; |
118 | 118 | ||
@@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev) | |||
123 | 0); | 123 | 0); |
124 | } | 124 | } |
125 | 125 | ||
126 | int rk808_set_suspend_disable(struct regulator_dev *rdev) | 126 | static int rk808_set_suspend_disable(struct regulator_dev *rdev) |
127 | { | 127 | { |
128 | unsigned int reg; | 128 | unsigned int reg; |
129 | 129 | ||
diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c index 870cc49438db..96d2c18e051a 100644 --- a/drivers/regulator/rt5033-regulator.c +++ b/drivers/regulator/rt5033-regulator.c | |||
@@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = { | |||
36 | static const struct regulator_desc rt5033_supported_regulators[] = { | 36 | static const struct regulator_desc rt5033_supported_regulators[] = { |
37 | [RT5033_BUCK] = { | 37 | [RT5033_BUCK] = { |
38 | .name = "BUCK", | 38 | .name = "BUCK", |
39 | .of_match = of_match_ptr("BUCK"), | ||
40 | .regulators_node = of_match_ptr("regulators"), | ||
39 | .id = RT5033_BUCK, | 41 | .id = RT5033_BUCK, |
40 | .ops = &rt5033_buck_ops, | 42 | .ops = &rt5033_buck_ops, |
41 | .type = REGULATOR_VOLTAGE, | 43 | .type = REGULATOR_VOLTAGE, |
@@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = { | |||
50 | }, | 52 | }, |
51 | [RT5033_LDO] = { | 53 | [RT5033_LDO] = { |
52 | .name = "LDO", | 54 | .name = "LDO", |
55 | .of_match = of_match_ptr("LDO"), | ||
56 | .regulators_node = of_match_ptr("regulators"), | ||
53 | .id = RT5033_LDO, | 57 | .id = RT5033_LDO, |
54 | .ops = &rt5033_buck_ops, | 58 | .ops = &rt5033_buck_ops, |
55 | .type = REGULATOR_VOLTAGE, | 59 | .type = REGULATOR_VOLTAGE, |
@@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = { | |||
64 | }, | 68 | }, |
65 | [RT5033_SAFE_LDO] = { | 69 | [RT5033_SAFE_LDO] = { |
66 | .name = "SAFE_LDO", | 70 | .name = "SAFE_LDO", |
71 | .of_match = of_match_ptr("SAFE_LDO"), | ||
72 | .regulators_node = of_match_ptr("regulators"), | ||
67 | .id = RT5033_SAFE_LDO, | 73 | .id = RT5033_SAFE_LDO, |
68 | .ops = &rt5033_safe_ldo_ops, | 74 | .ops = &rt5033_safe_ldo_ops, |
69 | .type = REGULATOR_VOLTAGE, | 75 | .type = REGULATOR_VOLTAGE, |
@@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev) | |||
81 | int ret, i; | 87 | int ret, i; |
82 | struct regulator_config config = {}; | 88 | struct regulator_config config = {}; |
83 | 89 | ||
84 | config.dev = &pdev->dev; | 90 | config.dev = rt5033->dev; |
85 | config.driver_data = rt5033; | 91 | config.driver_data = rt5033; |
86 | 92 | ||
87 | for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) { | 93 | for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) { |
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 2809ae0d6bcd..ff828117798f 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops; | |||
405 | .enable_mask = S2MPS14_ENABLE_MASK \ | 405 | .enable_mask = S2MPS14_ENABLE_MASK \ |
406 | } | 406 | } |
407 | 407 | ||
408 | #define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \ | ||
409 | .name = "BUCK"#num, \ | ||
410 | .id = S2MPS13_BUCK##num, \ | ||
411 | .ops = &s2mps14_reg_ops, \ | ||
412 | .type = REGULATOR_VOLTAGE, \ | ||
413 | .owner = THIS_MODULE, \ | ||
414 | .min_uV = min, \ | ||
415 | .uV_step = step, \ | ||
416 | .linear_min_sel = min_sel, \ | ||
417 | .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ | ||
418 | .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ | ||
419 | .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ | ||
420 | .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ | ||
421 | .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \ | ||
422 | .enable_mask = S2MPS14_ENABLE_MASK \ | ||
423 | } | ||
424 | |||
425 | #define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \ | ||
426 | .name = "BUCK"#num, \ | ||
427 | .id = S2MPS13_BUCK##num, \ | ||
428 | .ops = &s2mps14_reg_ops, \ | ||
429 | .type = REGULATOR_VOLTAGE, \ | ||
430 | .owner = THIS_MODULE, \ | ||
431 | .min_uV = min, \ | ||
432 | .uV_step = step, \ | ||
433 | .linear_min_sel = min_sel, \ | ||
434 | .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ | ||
435 | .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ | ||
436 | .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ | ||
437 | .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ | ||
438 | .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \ | ||
439 | .enable_mask = S2MPS14_ENABLE_MASK \ | ||
440 | } | ||
441 | |||
408 | static const struct regulator_desc s2mps13_regulators[] = { | 442 | static const struct regulator_desc s2mps13_regulators[] = { |
409 | regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), | 443 | regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), |
410 | regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), | 444 | regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), |
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = { | |||
452 | regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), | 486 | regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), |
453 | regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), | 487 | regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), |
454 | regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), | 488 | regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), |
455 | regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10), | 489 | regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10), |
456 | regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20), | 490 | regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20), |
457 | regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20), | 491 | regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20), |
458 | regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10), | 492 | regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10), |
459 | }; | 493 | }; |
460 | 494 | ||
461 | static int s2mps14_regulator_enable(struct regulator_dev *rdev) | 495 | static int s2mps14_regulator_enable(struct regulator_dev *rdev) |
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index 7380af8bd50d..b941e564b3f3 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c | |||
@@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev, | |||
173 | } | 173 | } |
174 | 174 | ||
175 | /* Operations permitted on VDCDCx */ | 175 | /* Operations permitted on VDCDCx */ |
176 | static struct regulator_ops tps65023_dcdc_ops = { | 176 | static const struct regulator_ops tps65023_dcdc_ops = { |
177 | .is_enabled = regulator_is_enabled_regmap, | 177 | .is_enabled = regulator_is_enabled_regmap, |
178 | .enable = regulator_enable_regmap, | 178 | .enable = regulator_enable_regmap, |
179 | .disable = regulator_disable_regmap, | 179 | .disable = regulator_disable_regmap, |
@@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = { | |||
184 | }; | 184 | }; |
185 | 185 | ||
186 | /* Operations permitted on LDOx */ | 186 | /* Operations permitted on LDOx */ |
187 | static struct regulator_ops tps65023_ldo_ops = { | 187 | static const struct regulator_ops tps65023_ldo_ops = { |
188 | .is_enabled = regulator_is_enabled_regmap, | 188 | .is_enabled = regulator_is_enabled_regmap, |
189 | .enable = regulator_enable_regmap, | 189 | .enable = regulator_enable_regmap, |
190 | .disable = regulator_disable_regmap, | 190 | .disable = regulator_disable_regmap, |
@@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = { | |||
194 | .map_voltage = regulator_map_voltage_ascend, | 194 | .map_voltage = regulator_map_voltage_ascend, |
195 | }; | 195 | }; |
196 | 196 | ||
197 | static struct regmap_config tps65023_regmap_config = { | 197 | static const struct regmap_config tps65023_regmap_config = { |
198 | .reg_bits = 8, | 198 | .reg_bits = 8, |
199 | .val_bits = 8, | 199 | .val_bits = 8, |
200 | }; | 200 | }; |
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index 4aa60d74004e..6c719f23520a 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c | |||
@@ -26,7 +26,7 @@ static int __init rtc_hctosys(void) | |||
26 | { | 26 | { |
27 | int err = -ENODEV; | 27 | int err = -ENODEV; |
28 | struct rtc_time tm; | 28 | struct rtc_time tm; |
29 | struct timespec tv = { | 29 | struct timespec64 tv64 = { |
30 | .tv_nsec = NSEC_PER_SEC >> 1, | 30 | .tv_nsec = NSEC_PER_SEC >> 1, |
31 | }; | 31 | }; |
32 | struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); | 32 | struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); |
@@ -45,25 +45,17 @@ static int __init rtc_hctosys(void) | |||
45 | 45 | ||
46 | } | 46 | } |
47 | 47 | ||
48 | err = rtc_valid_tm(&tm); | 48 | tv64.tv_sec = rtc_tm_to_time64(&tm); |
49 | if (err) { | ||
50 | dev_err(rtc->dev.parent, | ||
51 | "hctosys: invalid date/time\n"); | ||
52 | goto err_invalid; | ||
53 | } | ||
54 | |||
55 | rtc_tm_to_time(&tm, &tv.tv_sec); | ||
56 | 49 | ||
57 | err = do_settimeofday(&tv); | 50 | err = do_settimeofday64(&tv64); |
58 | 51 | ||
59 | dev_info(rtc->dev.parent, | 52 | dev_info(rtc->dev.parent, |
60 | "setting system clock to " | 53 | "setting system clock to " |
61 | "%d-%02d-%02d %02d:%02d:%02d UTC (%u)\n", | 54 | "%d-%02d-%02d %02d:%02d:%02d UTC (%lld)\n", |
62 | tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, | 55 | tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, |
63 | tm.tm_hour, tm.tm_min, tm.tm_sec, | 56 | tm.tm_hour, tm.tm_min, tm.tm_sec, |
64 | (unsigned int) tv.tv_sec); | 57 | (long long) tv64.tv_sec); |
65 | 58 | ||
66 | err_invalid: | ||
67 | err_read: | 59 | err_read: |
68 | rtc_class_close(rtc); | 60 | rtc_class_close(rtc); |
69 | 61 | ||
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 45bfc28ee3aa..37215cf983e9 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -73,10 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) | |||
73 | else if (rtc->ops->set_time) | 73 | else if (rtc->ops->set_time) |
74 | err = rtc->ops->set_time(rtc->dev.parent, tm); | 74 | err = rtc->ops->set_time(rtc->dev.parent, tm); |
75 | else if (rtc->ops->set_mmss) { | 75 | else if (rtc->ops->set_mmss) { |
76 | unsigned long secs; | 76 | time64_t secs64 = rtc_tm_to_time64(tm); |
77 | err = rtc_tm_to_time(tm, &secs); | 77 | err = rtc->ops->set_mmss(rtc->dev.parent, secs64); |
78 | if (err == 0) | ||
79 | err = rtc->ops->set_mmss(rtc->dev.parent, secs); | ||
80 | } else | 78 | } else |
81 | err = -EINVAL; | 79 | err = -EINVAL; |
82 | 80 | ||
@@ -105,7 +103,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs) | |||
105 | 103 | ||
106 | err = rtc->ops->read_time(rtc->dev.parent, &old); | 104 | err = rtc->ops->read_time(rtc->dev.parent, &old); |
107 | if (err == 0) { | 105 | if (err == 0) { |
108 | rtc_time_to_tm(secs, &new); | 106 | rtc_time64_to_tm(secs, &new); |
109 | 107 | ||
110 | /* | 108 | /* |
111 | * avoid writing when we're going to change the day of | 109 | * avoid writing when we're going to change the day of |
@@ -157,7 +155,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
157 | int err; | 155 | int err; |
158 | struct rtc_time before, now; | 156 | struct rtc_time before, now; |
159 | int first_time = 1; | 157 | int first_time = 1; |
160 | unsigned long t_now, t_alm; | 158 | time64_t t_now, t_alm; |
161 | enum { none, day, month, year } missing = none; | 159 | enum { none, day, month, year } missing = none; |
162 | unsigned days; | 160 | unsigned days; |
163 | 161 | ||
@@ -258,8 +256,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
258 | } | 256 | } |
259 | 257 | ||
260 | /* with luck, no rollover is needed */ | 258 | /* with luck, no rollover is needed */ |
261 | rtc_tm_to_time(&now, &t_now); | 259 | t_now = rtc_tm_to_time64(&now); |
262 | rtc_tm_to_time(&alarm->time, &t_alm); | 260 | t_alm = rtc_tm_to_time64(&alarm->time); |
263 | if (t_now < t_alm) | 261 | if (t_now < t_alm) |
264 | goto done; | 262 | goto done; |
265 | 263 | ||
@@ -273,7 +271,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
273 | case day: | 271 | case day: |
274 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); | 272 | dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); |
275 | t_alm += 24 * 60 * 60; | 273 | t_alm += 24 * 60 * 60; |
276 | rtc_time_to_tm(t_alm, &alarm->time); | 274 | rtc_time64_to_tm(t_alm, &alarm->time); |
277 | break; | 275 | break; |
278 | 276 | ||
279 | /* Month rollover ... if it's the 31th, an alarm on the 3rd will | 277 | /* Month rollover ... if it's the 31th, an alarm on the 3rd will |
@@ -346,19 +344,19 @@ EXPORT_SYMBOL_GPL(rtc_read_alarm); | |||
346 | static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | 344 | static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
347 | { | 345 | { |
348 | struct rtc_time tm; | 346 | struct rtc_time tm; |
349 | long now, scheduled; | 347 | time64_t now, scheduled; |
350 | int err; | 348 | int err; |
351 | 349 | ||
352 | err = rtc_valid_tm(&alarm->time); | 350 | err = rtc_valid_tm(&alarm->time); |
353 | if (err) | 351 | if (err) |
354 | return err; | 352 | return err; |
355 | rtc_tm_to_time(&alarm->time, &scheduled); | 353 | scheduled = rtc_tm_to_time64(&alarm->time); |
356 | 354 | ||
357 | /* Make sure we're not setting alarms in the past */ | 355 | /* Make sure we're not setting alarms in the past */ |
358 | err = __rtc_read_time(rtc, &tm); | 356 | err = __rtc_read_time(rtc, &tm); |
359 | if (err) | 357 | if (err) |
360 | return err; | 358 | return err; |
361 | rtc_tm_to_time(&tm, &now); | 359 | now = rtc_tm_to_time64(&tm); |
362 | if (scheduled <= now) | 360 | if (scheduled <= now) |
363 | return -ETIME; | 361 | return -ETIME; |
364 | /* | 362 | /* |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index d04939369251..799c34bcb26f 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -304,12 +304,12 @@ static long rtc_dev_ioctl(struct file *file, | |||
304 | * Not supported here. | 304 | * Not supported here. |
305 | */ | 305 | */ |
306 | { | 306 | { |
307 | unsigned long now, then; | 307 | time64_t now, then; |
308 | 308 | ||
309 | err = rtc_read_time(rtc, &tm); | 309 | err = rtc_read_time(rtc, &tm); |
310 | if (err < 0) | 310 | if (err < 0) |
311 | return err; | 311 | return err; |
312 | rtc_tm_to_time(&tm, &now); | 312 | now = rtc_tm_to_time64(&tm); |
313 | 313 | ||
314 | alarm.time.tm_mday = tm.tm_mday; | 314 | alarm.time.tm_mday = tm.tm_mday; |
315 | alarm.time.tm_mon = tm.tm_mon; | 315 | alarm.time.tm_mon = tm.tm_mon; |
@@ -317,11 +317,11 @@ static long rtc_dev_ioctl(struct file *file, | |||
317 | err = rtc_valid_tm(&alarm.time); | 317 | err = rtc_valid_tm(&alarm.time); |
318 | if (err < 0) | 318 | if (err < 0) |
319 | return err; | 319 | return err; |
320 | rtc_tm_to_time(&alarm.time, &then); | 320 | then = rtc_tm_to_time64(&alarm.time); |
321 | 321 | ||
322 | /* alarm may need to wrap into tomorrow */ | 322 | /* alarm may need to wrap into tomorrow */ |
323 | if (then < now) { | 323 | if (then < now) { |
324 | rtc_time_to_tm(now + 24 * 60 * 60, &tm); | 324 | rtc_time64_to_tm(now + 24 * 60 * 60, &tm); |
325 | alarm.time.tm_mday = tm.tm_mday; | 325 | alarm.time.tm_mday = tm.tm_mday; |
326 | alarm.time.tm_mon = tm.tm_mon; | 326 | alarm.time.tm_mon = tm.tm_mon; |
327 | alarm.time.tm_year = tm.tm_year; | 327 | alarm.time.tm_year = tm.tm_year; |
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index b37b0c80bd5a..cb989cd00b14 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c | |||
@@ -218,6 +218,7 @@ static int __init efi_rtc_probe(struct platform_device *dev) | |||
218 | if (IS_ERR(rtc)) | 218 | if (IS_ERR(rtc)) |
219 | return PTR_ERR(rtc); | 219 | return PTR_ERR(rtc); |
220 | 220 | ||
221 | rtc->uie_unsupported = 1; | ||
221 | platform_set_drvdata(dev, rtc); | 222 | platform_set_drvdata(dev, rtc); |
222 | 223 | ||
223 | return 0; | 224 | return 0; |
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b5e7c4670205..89ac1d5083c6 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c | |||
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); | |||
832 | static const struct platform_device_id s5m_rtc_id[] = { | 832 | static const struct platform_device_id s5m_rtc_id[] = { |
833 | { "s5m-rtc", S5M8767X }, | 833 | { "s5m-rtc", S5M8767X }, |
834 | { "s2mps14-rtc", S2MPS14X }, | 834 | { "s2mps14-rtc", S2MPS14X }, |
835 | { }, | ||
835 | }; | 836 | }; |
836 | 837 | ||
837 | static struct platform_driver s5m_rtc_driver = { | 838 | static struct platform_driver s5m_rtc_driver = { |
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c index bf3e242ccc5c..eb71872d0361 100644 --- a/drivers/rtc/systohc.c +++ b/drivers/rtc/systohc.c | |||
@@ -20,16 +20,16 @@ | |||
20 | * | 20 | * |
21 | * If temporary failure is indicated the caller should try again 'soon' | 21 | * If temporary failure is indicated the caller should try again 'soon' |
22 | */ | 22 | */ |
23 | int rtc_set_ntp_time(struct timespec now) | 23 | int rtc_set_ntp_time(struct timespec64 now) |
24 | { | 24 | { |
25 | struct rtc_device *rtc; | 25 | struct rtc_device *rtc; |
26 | struct rtc_time tm; | 26 | struct rtc_time tm; |
27 | int err = -ENODEV; | 27 | int err = -ENODEV; |
28 | 28 | ||
29 | if (now.tv_nsec < (NSEC_PER_SEC >> 1)) | 29 | if (now.tv_nsec < (NSEC_PER_SEC >> 1)) |
30 | rtc_time_to_tm(now.tv_sec, &tm); | 30 | rtc_time64_to_tm(now.tv_sec, &tm); |
31 | else | 31 | else |
32 | rtc_time_to_tm(now.tv_sec + 1, &tm); | 32 | rtc_time64_to_tm(now.tv_sec + 1, &tm); |
33 | 33 | ||
34 | rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); | 34 | rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); |
35 | if (rtc) { | 35 | if (rtc) { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f407e3763432..642c77c76b84 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1784 | QETH_DBF_TEXT(SETUP, 2, "idxanswr"); | 1784 | QETH_DBF_TEXT(SETUP, 2, "idxanswr"); |
1785 | card = CARD_FROM_CDEV(channel->ccwdev); | 1785 | card = CARD_FROM_CDEV(channel->ccwdev); |
1786 | iob = qeth_get_buffer(channel); | 1786 | iob = qeth_get_buffer(channel); |
1787 | if (!iob) | ||
1788 | return -ENOMEM; | ||
1787 | iob->callback = idx_reply_cb; | 1789 | iob->callback = idx_reply_cb; |
1788 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); | 1790 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); |
1789 | channel->ccw.count = QETH_BUFSIZE; | 1791 | channel->ccw.count = QETH_BUFSIZE; |
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1834 | QETH_DBF_TEXT(SETUP, 2, "idxactch"); | 1836 | QETH_DBF_TEXT(SETUP, 2, "idxactch"); |
1835 | 1837 | ||
1836 | iob = qeth_get_buffer(channel); | 1838 | iob = qeth_get_buffer(channel); |
1839 | if (!iob) | ||
1840 | return -ENOMEM; | ||
1837 | iob->callback = idx_reply_cb; | 1841 | iob->callback = idx_reply_cb; |
1838 | memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); | 1842 | memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); |
1839 | channel->ccw.count = IDX_ACTIVATE_SIZE; | 1843 | channel->ccw.count = IDX_ACTIVATE_SIZE; |
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, | |||
2021 | } | 2025 | } |
2022 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); | 2026 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); |
2023 | 2027 | ||
2028 | /** | ||
2029 | * qeth_send_control_data() - send control command to the card | ||
2030 | * @card: qeth_card structure pointer | ||
2031 | * @len: size of the command buffer | ||
2032 | * @iob: qeth_cmd_buffer pointer | ||
2033 | * @reply_cb: callback function pointer | ||
2034 | * @cb_card: pointer to the qeth_card structure | ||
2035 | * @cb_reply: pointer to the qeth_reply structure | ||
2036 | * @cb_cmd: pointer to the original iob for non-IPA | ||
2037 | * commands, or to the qeth_ipa_cmd structure | ||
2038 | * for the IPA commands. | ||
2039 | * @reply_param: private pointer passed to the callback | ||
2040 | * | ||
2041 | * Returns the value of the `return_code' field of the response | ||
2042 | * block returned from the hardware, or other error indication. | ||
2043 | * Value of zero indicates successful execution of the command. | ||
2044 | * | ||
2045 | * Callback function gets called one or more times, with cb_cmd | ||
2046 | * pointing to the response returned by the hardware. Callback | ||
2047 | * function must return non-zero if more reply blocks are expected, | ||
2048 | * and zero if the last or only reply block is received. Callback | ||
2049 | * function can get the value of the reply_param pointer from the | ||
2050 | * field 'param' of the structure qeth_reply. | ||
2051 | */ | ||
2052 | |||
2024 | int qeth_send_control_data(struct qeth_card *card, int len, | 2053 | int qeth_send_control_data(struct qeth_card *card, int len, |
2025 | struct qeth_cmd_buffer *iob, | 2054 | struct qeth_cmd_buffer *iob, |
2026 | int (*reply_cb)(struct qeth_card *, struct qeth_reply *, | 2055 | int (*reply_cb)(struct qeth_card *cb_card, |
2027 | unsigned long), | 2056 | struct qeth_reply *cb_reply, |
2057 | unsigned long cb_cmd), | ||
2028 | void *reply_param) | 2058 | void *reply_param) |
2029 | { | 2059 | { |
2030 | int rc; | 2060 | int rc; |
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, | |||
2914 | struct qeth_cmd_buffer *iob; | 2944 | struct qeth_cmd_buffer *iob; |
2915 | struct qeth_ipa_cmd *cmd; | 2945 | struct qeth_ipa_cmd *cmd; |
2916 | 2946 | ||
2917 | iob = qeth_wait_for_buffer(&card->write); | 2947 | iob = qeth_get_buffer(&card->write); |
2918 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 2948 | if (iob) { |
2919 | qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); | 2949 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
2950 | qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); | ||
2951 | } else { | ||
2952 | dev_warn(&card->gdev->dev, | ||
2953 | "The qeth driver ran out of channel command buffers\n"); | ||
2954 | QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", | ||
2955 | dev_name(&card->gdev->dev)); | ||
2956 | } | ||
2920 | 2957 | ||
2921 | return iob; | 2958 | return iob; |
2922 | } | 2959 | } |
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
2932 | } | 2969 | } |
2933 | EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); | 2970 | EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); |
2934 | 2971 | ||
2972 | /** | ||
2973 | * qeth_send_ipa_cmd() - send an IPA command | ||
2974 | * | ||
2975 | * See qeth_send_control_data() for explanation of the arguments. | ||
2976 | */ | ||
2977 | |||
2935 | int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | 2978 | int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, |
2936 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, | 2979 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, |
2937 | unsigned long), | 2980 | unsigned long), |
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card) | |||
2968 | QETH_DBF_TEXT(SETUP, 2, "strtlan"); | 3011 | QETH_DBF_TEXT(SETUP, 2, "strtlan"); |
2969 | 3012 | ||
2970 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); | 3013 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); |
3014 | if (!iob) | ||
3015 | return -ENOMEM; | ||
2971 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); | 3016 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); |
2972 | return rc; | 3017 | return rc; |
2973 | } | 3018 | } |
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, | |||
3013 | 3058 | ||
3014 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, | 3059 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, |
3015 | QETH_PROT_IPV4); | 3060 | QETH_PROT_IPV4); |
3016 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3061 | if (iob) { |
3017 | cmd->data.setadapterparms.hdr.cmdlength = cmdlen; | 3062 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
3018 | cmd->data.setadapterparms.hdr.command_code = command; | 3063 | cmd->data.setadapterparms.hdr.cmdlength = cmdlen; |
3019 | cmd->data.setadapterparms.hdr.used_total = 1; | 3064 | cmd->data.setadapterparms.hdr.command_code = command; |
3020 | cmd->data.setadapterparms.hdr.seq_no = 1; | 3065 | cmd->data.setadapterparms.hdr.used_total = 1; |
3066 | cmd->data.setadapterparms.hdr.seq_no = 1; | ||
3067 | } | ||
3021 | 3068 | ||
3022 | return iob; | 3069 | return iob; |
3023 | } | 3070 | } |
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card) | |||
3030 | QETH_CARD_TEXT(card, 3, "queryadp"); | 3077 | QETH_CARD_TEXT(card, 3, "queryadp"); |
3031 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, | 3078 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, |
3032 | sizeof(struct qeth_ipacmd_setadpparms)); | 3079 | sizeof(struct qeth_ipacmd_setadpparms)); |
3080 | if (!iob) | ||
3081 | return -ENOMEM; | ||
3033 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); | 3082 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); |
3034 | return rc; | 3083 | return rc; |
3035 | } | 3084 | } |
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) | |||
3080 | 3129 | ||
3081 | QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); | 3130 | QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); |
3082 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); | 3131 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); |
3132 | if (!iob) | ||
3133 | return -ENOMEM; | ||
3083 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); | 3134 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); |
3084 | return rc; | 3135 | return rc; |
3085 | } | 3136 | } |
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card, | |||
3119 | return -ENOMEDIUM; | 3170 | return -ENOMEDIUM; |
3120 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, | 3171 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, |
3121 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); | 3172 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); |
3173 | if (!iob) | ||
3174 | return -ENOMEM; | ||
3122 | return qeth_send_ipa_cmd(card, iob, | 3175 | return qeth_send_ipa_cmd(card, iob, |
3123 | qeth_query_switch_attributes_cb, sw_info); | 3176 | qeth_query_switch_attributes_cb, sw_info); |
3124 | } | 3177 | } |
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card) | |||
3146 | 3199 | ||
3147 | QETH_DBF_TEXT(SETUP, 2, "qdiagass"); | 3200 | QETH_DBF_TEXT(SETUP, 2, "qdiagass"); |
3148 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 3201 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
3202 | if (!iob) | ||
3203 | return -ENOMEM; | ||
3149 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3204 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
3150 | cmd->data.diagass.subcmd_len = 16; | 3205 | cmd->data.diagass.subcmd_len = 16; |
3151 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; | 3206 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; |
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) | |||
3197 | 3252 | ||
3198 | QETH_DBF_TEXT(SETUP, 2, "diagtrap"); | 3253 | QETH_DBF_TEXT(SETUP, 2, "diagtrap"); |
3199 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 3254 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
3255 | if (!iob) | ||
3256 | return -ENOMEM; | ||
3200 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3257 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
3201 | cmd->data.diagass.subcmd_len = 80; | 3258 | cmd->data.diagass.subcmd_len = 80; |
3202 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; | 3259 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; |
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
4162 | 4219 | ||
4163 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, | 4220 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, |
4164 | sizeof(struct qeth_ipacmd_setadpparms)); | 4221 | sizeof(struct qeth_ipacmd_setadpparms)); |
4222 | if (!iob) | ||
4223 | return; | ||
4165 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); | 4224 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); |
4166 | cmd->data.setadapterparms.data.mode = mode; | 4225 | cmd->data.setadapterparms.data.mode = mode; |
4167 | qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); | 4226 | qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); |
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) | |||
4232 | 4291 | ||
4233 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, | 4292 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, |
4234 | sizeof(struct qeth_ipacmd_setadpparms)); | 4293 | sizeof(struct qeth_ipacmd_setadpparms)); |
4294 | if (!iob) | ||
4295 | return -ENOMEM; | ||
4235 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4296 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4236 | cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; | 4297 | cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; |
4237 | cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; | 4298 | cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; |
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, | |||
4345 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, | 4406 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, |
4346 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + | 4407 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + |
4347 | sizeof(struct qeth_set_access_ctrl)); | 4408 | sizeof(struct qeth_set_access_ctrl)); |
4409 | if (!iob) | ||
4410 | return -ENOMEM; | ||
4348 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4411 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4349 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; | 4412 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; |
4350 | access_ctrl_req->subcmd_code = isolation; | 4413 | access_ctrl_req->subcmd_code = isolation; |
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
4588 | 4651 | ||
4589 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, | 4652 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, |
4590 | QETH_SNMP_SETADP_CMDLENGTH + req_len); | 4653 | QETH_SNMP_SETADP_CMDLENGTH + req_len); |
4654 | if (!iob) { | ||
4655 | rc = -ENOMEM; | ||
4656 | goto out; | ||
4657 | } | ||
4591 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4658 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4592 | memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); | 4659 | memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); |
4593 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, | 4660 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, |
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
4599 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) | 4666 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) |
4600 | rc = -EFAULT; | 4667 | rc = -EFAULT; |
4601 | } | 4668 | } |
4602 | 4669 | out: | |
4603 | kfree(ureq); | 4670 | kfree(ureq); |
4604 | kfree(qinfo.udata); | 4671 | kfree(qinfo.udata); |
4605 | return rc; | 4672 | return rc; |
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata) | |||
4670 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, | 4737 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, |
4671 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + | 4738 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + |
4672 | sizeof(struct qeth_query_oat)); | 4739 | sizeof(struct qeth_query_oat)); |
4740 | if (!iob) { | ||
4741 | rc = -ENOMEM; | ||
4742 | goto out_free; | ||
4743 | } | ||
4673 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4744 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4674 | oat_req = &cmd->data.setadapterparms.data.query_oat; | 4745 | oat_req = &cmd->data.setadapterparms.data.query_oat; |
4675 | oat_req->subcmd_code = oat_data.command; | 4746 | oat_req->subcmd_code = oat_data.command; |
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card, | |||
4735 | return -EOPNOTSUPP; | 4806 | return -EOPNOTSUPP; |
4736 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, | 4807 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, |
4737 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); | 4808 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); |
4809 | if (!iob) | ||
4810 | return -ENOMEM; | ||
4738 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, | 4811 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, |
4739 | (void *)carrier_info); | 4812 | (void *)carrier_info); |
4740 | } | 4813 | } |
@@ -5060,11 +5133,23 @@ retriable: | |||
5060 | card->options.adp.supported_funcs = 0; | 5133 | card->options.adp.supported_funcs = 0; |
5061 | card->options.sbp.supported_funcs = 0; | 5134 | card->options.sbp.supported_funcs = 0; |
5062 | card->info.diagass_support = 0; | 5135 | card->info.diagass_support = 0; |
5063 | qeth_query_ipassists(card, QETH_PROT_IPV4); | 5136 | rc = qeth_query_ipassists(card, QETH_PROT_IPV4); |
5064 | if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) | 5137 | if (rc == -ENOMEM) |
5065 | qeth_query_setadapterparms(card); | 5138 | goto out; |
5066 | if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) | 5139 | if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
5067 | qeth_query_setdiagass(card); | 5140 | rc = qeth_query_setadapterparms(card); |
5141 | if (rc < 0) { | ||
5142 | QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); | ||
5143 | goto out; | ||
5144 | } | ||
5145 | } | ||
5146 | if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { | ||
5147 | rc = qeth_query_setdiagass(card); | ||
5148 | if (rc < 0) { | ||
5149 | QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); | ||
5150 | goto out; | ||
5151 | } | ||
5152 | } | ||
5068 | return 0; | 5153 | return 0; |
5069 | out: | 5154 | out: |
5070 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " | 5155 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d02cd1a67943..ce87ae72edbd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); | |||
27 | static int qeth_l2_stop(struct net_device *); | 27 | static int qeth_l2_stop(struct net_device *); |
28 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); | 28 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); |
29 | static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, | 29 | static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, |
30 | enum qeth_ipa_cmds, | 30 | enum qeth_ipa_cmds); |
31 | int (*reply_cb) (struct qeth_card *, | ||
32 | struct qeth_reply*, | ||
33 | unsigned long)); | ||
34 | static void qeth_l2_set_multicast_list(struct net_device *); | 31 | static void qeth_l2_set_multicast_list(struct net_device *); |
35 | static int qeth_l2_recover(void *); | 32 | static int qeth_l2_recover(void *); |
36 | static void qeth_bridgeport_query_support(struct qeth_card *card); | 33 | static void qeth_bridgeport_query_support(struct qeth_card *card); |
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) | |||
130 | return ndev; | 127 | return ndev; |
131 | } | 128 | } |
132 | 129 | ||
133 | static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | 130 | static int qeth_setdel_makerc(struct qeth_card *card, int retcode) |
134 | struct qeth_reply *reply, | ||
135 | unsigned long data) | ||
136 | { | 131 | { |
137 | struct qeth_ipa_cmd *cmd; | 132 | int rc; |
138 | __u8 *mac; | ||
139 | 133 | ||
140 | QETH_CARD_TEXT(card, 2, "L2Sgmacb"); | 134 | if (retcode) |
141 | cmd = (struct qeth_ipa_cmd *) data; | 135 | QETH_CARD_TEXT_(card, 2, "err%04x", retcode); |
142 | mac = &cmd->data.setdelmac.mac[0]; | 136 | switch (retcode) { |
143 | /* MAC already registered, needed in couple/uncouple case */ | 137 | case IPA_RC_SUCCESS: |
144 | if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) { | 138 | rc = 0; |
145 | QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", | 139 | break; |
146 | mac, QETH_CARD_IFNAME(card)); | 140 | case IPA_RC_L2_UNSUPPORTED_CMD: |
147 | cmd->hdr.return_code = 0; | 141 | rc = -ENOSYS; |
142 | break; | ||
143 | case IPA_RC_L2_ADDR_TABLE_FULL: | ||
144 | rc = -ENOSPC; | ||
145 | break; | ||
146 | case IPA_RC_L2_DUP_MAC: | ||
147 | case IPA_RC_L2_DUP_LAYER3_MAC: | ||
148 | rc = -EEXIST; | ||
149 | break; | ||
150 | case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: | ||
151 | case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: | ||
152 | rc = -EPERM; | ||
153 | break; | ||
154 | case IPA_RC_L2_MAC_NOT_FOUND: | ||
155 | rc = -ENOENT; | ||
156 | break; | ||
157 | case -ENOMEM: | ||
158 | rc = -ENOMEM; | ||
159 | break; | ||
160 | default: | ||
161 | rc = -EIO; | ||
162 | break; | ||
148 | } | 163 | } |
149 | if (cmd->hdr.return_code) | 164 | return rc; |
150 | QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n", | ||
151 | mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); | ||
152 | return 0; | ||
153 | } | 165 | } |
154 | 166 | ||
155 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) | 167 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) |
156 | { | 168 | { |
157 | QETH_CARD_TEXT(card, 2, "L2Sgmac"); | 169 | int rc; |
158 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, | ||
159 | qeth_l2_send_setgroupmac_cb); | ||
160 | } | ||
161 | |||
162 | static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | ||
163 | struct qeth_reply *reply, | ||
164 | unsigned long data) | ||
165 | { | ||
166 | struct qeth_ipa_cmd *cmd; | ||
167 | __u8 *mac; | ||
168 | 170 | ||
169 | QETH_CARD_TEXT(card, 2, "L2Dgmacb"); | 171 | QETH_CARD_TEXT(card, 2, "L2Sgmac"); |
170 | cmd = (struct qeth_ipa_cmd *) data; | 172 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
171 | mac = &cmd->data.setdelmac.mac[0]; | 173 | IPA_CMD_SETGMAC)); |
172 | if (cmd->hdr.return_code) | 174 | if (rc == -EEXIST) |
173 | QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", | 175 | QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", |
174 | mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 176 | mac, QETH_CARD_IFNAME(card)); |
175 | return 0; | 177 | else if (rc) |
178 | QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n", | ||
179 | mac, QETH_CARD_IFNAME(card), rc); | ||
180 | return rc; | ||
176 | } | 181 | } |
177 | 182 | ||
178 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) | 183 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) |
179 | { | 184 | { |
185 | int rc; | ||
186 | |||
180 | QETH_CARD_TEXT(card, 2, "L2Dgmac"); | 187 | QETH_CARD_TEXT(card, 2, "L2Dgmac"); |
181 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, | 188 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
182 | qeth_l2_send_delgroupmac_cb); | 189 | IPA_CMD_DELGMAC)); |
190 | if (rc) | ||
191 | QETH_DBF_MESSAGE(2, | ||
192 | "Could not delete group MAC %pM on %s: %d\n", | ||
193 | mac, QETH_CARD_IFNAME(card), rc); | ||
194 | return rc; | ||
183 | } | 195 | } |
184 | 196 | ||
185 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) | 197 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) |
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) | |||
197 | mc->is_vmac = vmac; | 209 | mc->is_vmac = vmac; |
198 | 210 | ||
199 | if (vmac) { | 211 | if (vmac) { |
200 | rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | 212 | rc = qeth_setdel_makerc(card, |
201 | NULL); | 213 | qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC)); |
202 | } else { | 214 | } else { |
203 | rc = qeth_l2_send_setgroupmac(card, mac); | 215 | rc = qeth_setdel_makerc(card, |
216 | qeth_l2_send_setgroupmac(card, mac)); | ||
204 | } | 217 | } |
205 | 218 | ||
206 | if (!rc) | 219 | if (!rc) |
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del) | |||
218 | if (del) { | 231 | if (del) { |
219 | if (mc->is_vmac) | 232 | if (mc->is_vmac) |
220 | qeth_l2_send_setdelmac(card, mc->mc_addr, | 233 | qeth_l2_send_setdelmac(card, mc->mc_addr, |
221 | IPA_CMD_DELVMAC, NULL); | 234 | IPA_CMD_DELVMAC); |
222 | else | 235 | else |
223 | qeth_l2_send_delgroupmac(card, mc->mc_addr); | 236 | qeth_l2_send_delgroupmac(card, mc->mc_addr); |
224 | } | 237 | } |
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, | |||
291 | 304 | ||
292 | QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); | 305 | QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); |
293 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 306 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
307 | if (!iob) | ||
308 | return -ENOMEM; | ||
294 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 309 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
295 | cmd->data.setdelvlan.vlan_id = i; | 310 | cmd->data.setdelvlan.vlan_id = i; |
296 | return qeth_send_ipa_cmd(card, iob, | 311 | return qeth_send_ipa_cmd(card, iob, |
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, | |||
313 | { | 328 | { |
314 | struct qeth_card *card = dev->ml_priv; | 329 | struct qeth_card *card = dev->ml_priv; |
315 | struct qeth_vlan_vid *id; | 330 | struct qeth_vlan_vid *id; |
331 | int rc; | ||
316 | 332 | ||
317 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); | 333 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); |
318 | if (!vid) | 334 | if (!vid) |
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, | |||
328 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); | 344 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); |
329 | if (id) { | 345 | if (id) { |
330 | id->vid = vid; | 346 | id->vid = vid; |
331 | qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); | 347 | rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); |
348 | if (rc) { | ||
349 | kfree(id); | ||
350 | return rc; | ||
351 | } | ||
332 | spin_lock_bh(&card->vlanlock); | 352 | spin_lock_bh(&card->vlanlock); |
333 | list_add_tail(&id->list, &card->vid_list); | 353 | list_add_tail(&id->list, &card->vid_list); |
334 | spin_unlock_bh(&card->vlanlock); | 354 | spin_unlock_bh(&card->vlanlock); |
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, | |||
343 | { | 363 | { |
344 | struct qeth_vlan_vid *id, *tmpid = NULL; | 364 | struct qeth_vlan_vid *id, *tmpid = NULL; |
345 | struct qeth_card *card = dev->ml_priv; | 365 | struct qeth_card *card = dev->ml_priv; |
366 | int rc = 0; | ||
346 | 367 | ||
347 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); | 368 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); |
348 | if (card->info.type == QETH_CARD_TYPE_OSM) { | 369 | if (card->info.type == QETH_CARD_TYPE_OSM) { |
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, | |||
363 | } | 384 | } |
364 | spin_unlock_bh(&card->vlanlock); | 385 | spin_unlock_bh(&card->vlanlock); |
365 | if (tmpid) { | 386 | if (tmpid) { |
366 | qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); | 387 | rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); |
367 | kfree(tmpid); | 388 | kfree(tmpid); |
368 | } | 389 | } |
369 | qeth_l2_set_multicast_list(card->dev); | 390 | qeth_l2_set_multicast_list(card->dev); |
370 | return 0; | 391 | return rc; |
371 | } | 392 | } |
372 | 393 | ||
373 | static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | 394 | static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) |
@@ -539,91 +560,62 @@ out: | |||
539 | } | 560 | } |
540 | 561 | ||
541 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, | 562 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, |
542 | enum qeth_ipa_cmds ipacmd, | 563 | enum qeth_ipa_cmds ipacmd) |
543 | int (*reply_cb) (struct qeth_card *, | ||
544 | struct qeth_reply*, | ||
545 | unsigned long)) | ||
546 | { | 564 | { |
547 | struct qeth_ipa_cmd *cmd; | 565 | struct qeth_ipa_cmd *cmd; |
548 | struct qeth_cmd_buffer *iob; | 566 | struct qeth_cmd_buffer *iob; |
549 | 567 | ||
550 | QETH_CARD_TEXT(card, 2, "L2sdmac"); | 568 | QETH_CARD_TEXT(card, 2, "L2sdmac"); |
551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 569 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
570 | if (!iob) | ||
571 | return -ENOMEM; | ||
552 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 572 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
553 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; | 573 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; |
554 | memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); | 574 | memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); |
555 | return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); | 575 | return qeth_send_ipa_cmd(card, iob, NULL, NULL); |
556 | } | 576 | } |
557 | 577 | ||
558 | static int qeth_l2_send_setmac_cb(struct qeth_card *card, | 578 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) |
559 | struct qeth_reply *reply, | ||
560 | unsigned long data) | ||
561 | { | 579 | { |
562 | struct qeth_ipa_cmd *cmd; | 580 | int rc; |
563 | 581 | ||
564 | QETH_CARD_TEXT(card, 2, "L2Smaccb"); | 582 | QETH_CARD_TEXT(card, 2, "L2Setmac"); |
565 | cmd = (struct qeth_ipa_cmd *) data; | 583 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
566 | if (cmd->hdr.return_code) { | 584 | IPA_CMD_SETVMAC)); |
567 | QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); | 585 | if (rc == 0) { |
586 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | ||
587 | memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); | ||
588 | dev_info(&card->gdev->dev, | ||
589 | "MAC address %pM successfully registered on device %s\n", | ||
590 | card->dev->dev_addr, card->dev->name); | ||
591 | } else { | ||
568 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 592 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; |
569 | switch (cmd->hdr.return_code) { | 593 | switch (rc) { |
570 | case IPA_RC_L2_DUP_MAC: | 594 | case -EEXIST: |
571 | case IPA_RC_L2_DUP_LAYER3_MAC: | ||
572 | dev_warn(&card->gdev->dev, | 595 | dev_warn(&card->gdev->dev, |
573 | "MAC address %pM already exists\n", | 596 | "MAC address %pM already exists\n", mac); |
574 | cmd->data.setdelmac.mac); | ||
575 | break; | 597 | break; |
576 | case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: | 598 | case -EPERM: |
577 | case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: | ||
578 | dev_warn(&card->gdev->dev, | 599 | dev_warn(&card->gdev->dev, |
579 | "MAC address %pM is not authorized\n", | 600 | "MAC address %pM is not authorized\n", mac); |
580 | cmd->data.setdelmac.mac); | ||
581 | break; | ||
582 | default: | ||
583 | break; | 601 | break; |
584 | } | 602 | } |
585 | } else { | ||
586 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | ||
587 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, | ||
588 | OSA_ADDR_LEN); | ||
589 | dev_info(&card->gdev->dev, | ||
590 | "MAC address %pM successfully registered on device %s\n", | ||
591 | card->dev->dev_addr, card->dev->name); | ||
592 | } | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) | ||
597 | { | ||
598 | QETH_CARD_TEXT(card, 2, "L2Setmac"); | ||
599 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | ||
600 | qeth_l2_send_setmac_cb); | ||
601 | } | ||
602 | |||
603 | static int qeth_l2_send_delmac_cb(struct qeth_card *card, | ||
604 | struct qeth_reply *reply, | ||
605 | unsigned long data) | ||
606 | { | ||
607 | struct qeth_ipa_cmd *cmd; | ||
608 | |||
609 | QETH_CARD_TEXT(card, 2, "L2Dmaccb"); | ||
610 | cmd = (struct qeth_ipa_cmd *) data; | ||
611 | if (cmd->hdr.return_code) { | ||
612 | QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); | ||
613 | return 0; | ||
614 | } | 603 | } |
615 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 604 | return rc; |
616 | |||
617 | return 0; | ||
618 | } | 605 | } |
619 | 606 | ||
620 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) | 607 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) |
621 | { | 608 | { |
609 | int rc; | ||
610 | |||
622 | QETH_CARD_TEXT(card, 2, "L2Delmac"); | 611 | QETH_CARD_TEXT(card, 2, "L2Delmac"); |
623 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) | 612 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) |
624 | return 0; | 613 | return 0; |
625 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, | 614 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
626 | qeth_l2_send_delmac_cb); | 615 | IPA_CMD_DELVMAC)); |
616 | if (rc == 0) | ||
617 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | ||
618 | return rc; | ||
627 | } | 619 | } |
628 | 620 | ||
629 | static int qeth_l2_request_initial_mac(struct qeth_card *card) | 621 | static int qeth_l2_request_initial_mac(struct qeth_card *card) |
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
651 | if (rc) { | 643 | if (rc) { |
652 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " | 644 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " |
653 | "device %s: x%x\n", CARD_BUS_ID(card), rc); | 645 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
654 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 646 | QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); |
655 | return rc; | 647 | return rc; |
656 | } | 648 | } |
657 | QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); | 649 | QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); |
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
687 | return -ERESTARTSYS; | 679 | return -ERESTARTSYS; |
688 | } | 680 | } |
689 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); | 681 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); |
690 | if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) | 682 | if (!rc || (rc == -ENOENT)) |
691 | rc = qeth_l2_send_setmac(card, addr->sa_data); | 683 | rc = qeth_l2_send_setmac(card, addr->sa_data); |
692 | return rc ? -EINVAL : 0; | 684 | return rc ? -EINVAL : 0; |
693 | } | 685 | } |
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
996 | recover_flag = card->state; | 988 | recover_flag = card->state; |
997 | rc = qeth_core_hardsetup_card(card); | 989 | rc = qeth_core_hardsetup_card(card); |
998 | if (rc) { | 990 | if (rc) { |
999 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 991 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
1000 | rc = -ENODEV; | 992 | rc = -ENODEV; |
1001 | goto out_remove; | 993 | goto out_remove; |
1002 | } | 994 | } |
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card) | |||
1730 | 1722 | ||
1731 | QETH_CARD_TEXT(card, 2, "brqsuppo"); | 1723 | QETH_CARD_TEXT(card, 2, "brqsuppo"); |
1732 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1724 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
1725 | if (!iob) | ||
1726 | return; | ||
1733 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1727 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1734 | cmd->data.sbp.hdr.cmdlength = | 1728 | cmd->data.sbp.hdr.cmdlength = |
1735 | sizeof(struct qeth_ipacmd_sbp_hdr) + | 1729 | sizeof(struct qeth_ipacmd_sbp_hdr) + |
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, | |||
1805 | if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) | 1799 | if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) |
1806 | return -EOPNOTSUPP; | 1800 | return -EOPNOTSUPP; |
1807 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1801 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
1802 | if (!iob) | ||
1803 | return -ENOMEM; | ||
1808 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1804 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1809 | cmd->data.sbp.hdr.cmdlength = | 1805 | cmd->data.sbp.hdr.cmdlength = |
1810 | sizeof(struct qeth_ipacmd_sbp_hdr); | 1806 | sizeof(struct qeth_ipacmd_sbp_hdr); |
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, | |||
1817 | if (rc) | 1813 | if (rc) |
1818 | return rc; | 1814 | return rc; |
1819 | rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); | 1815 | rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); |
1820 | if (rc) | 1816 | return rc; |
1821 | return rc; | ||
1822 | return 0; | ||
1823 | } | 1817 | } |
1824 | EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); | 1818 | EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); |
1825 | 1819 | ||
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) | |||
1873 | if (!(card->options.sbp.supported_funcs & setcmd)) | 1867 | if (!(card->options.sbp.supported_funcs & setcmd)) |
1874 | return -EOPNOTSUPP; | 1868 | return -EOPNOTSUPP; |
1875 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1869 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
1870 | if (!iob) | ||
1871 | return -ENOMEM; | ||
1876 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1872 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1877 | cmd->data.sbp.hdr.cmdlength = cmdlength; | 1873 | cmd->data.sbp.hdr.cmdlength = cmdlength; |
1878 | cmd->data.sbp.hdr.command_code = setcmd; | 1874 | cmd->data.sbp.hdr.command_code = setcmd; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 625227ad16ee..e2a0ee845399 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, | |||
549 | QETH_CARD_TEXT(card, 4, "setdelmc"); | 549 | QETH_CARD_TEXT(card, 4, "setdelmc"); |
550 | 550 | ||
551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
552 | if (!iob) | ||
553 | return -ENOMEM; | ||
552 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 554 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
553 | memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); | 555 | memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); |
554 | if (addr->proto == QETH_PROT_IPV6) | 556 | if (addr->proto == QETH_PROT_IPV6) |
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, | |||
588 | QETH_CARD_TEXT_(card, 4, "flags%02X", flags); | 590 | QETH_CARD_TEXT_(card, 4, "flags%02X", flags); |
589 | 591 | ||
590 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 592 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
593 | if (!iob) | ||
594 | return -ENOMEM; | ||
591 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 595 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
592 | if (addr->proto == QETH_PROT_IPV6) { | 596 | if (addr->proto == QETH_PROT_IPV6) { |
593 | memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, | 597 | memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, |
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
616 | 620 | ||
617 | QETH_CARD_TEXT(card, 4, "setroutg"); | 621 | QETH_CARD_TEXT(card, 4, "setroutg"); |
618 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); | 622 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); |
623 | if (!iob) | ||
624 | return -ENOMEM; | ||
619 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 625 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
620 | cmd->data.setrtg.type = (type); | 626 | cmd->data.setrtg.type = (type); |
621 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); | 627 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); |
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( | |||
1049 | QETH_CARD_TEXT(card, 4, "getasscm"); | 1055 | QETH_CARD_TEXT(card, 4, "getasscm"); |
1050 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); | 1056 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); |
1051 | 1057 | ||
1052 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1058 | if (iob) { |
1053 | cmd->data.setassparms.hdr.assist_no = ipa_func; | 1059 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1054 | cmd->data.setassparms.hdr.length = 8 + len; | 1060 | cmd->data.setassparms.hdr.assist_no = ipa_func; |
1055 | cmd->data.setassparms.hdr.command_code = cmd_code; | 1061 | cmd->data.setassparms.hdr.length = 8 + len; |
1056 | cmd->data.setassparms.hdr.return_code = 0; | 1062 | cmd->data.setassparms.hdr.command_code = cmd_code; |
1057 | cmd->data.setassparms.hdr.seq_no = 0; | 1063 | cmd->data.setassparms.hdr.return_code = 0; |
1064 | cmd->data.setassparms.hdr.seq_no = 0; | ||
1065 | } | ||
1058 | 1066 | ||
1059 | return iob; | 1067 | return iob; |
1060 | } | 1068 | } |
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, | |||
1090 | QETH_CARD_TEXT(card, 4, "simassp6"); | 1098 | QETH_CARD_TEXT(card, 4, "simassp6"); |
1091 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1099 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
1092 | 0, QETH_PROT_IPV6); | 1100 | 0, QETH_PROT_IPV6); |
1101 | if (!iob) | ||
1102 | return -ENOMEM; | ||
1093 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, | 1103 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, |
1094 | qeth_l3_default_setassparms_cb, NULL); | 1104 | qeth_l3_default_setassparms_cb, NULL); |
1095 | return rc; | 1105 | return rc; |
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, | |||
1108 | length = sizeof(__u32); | 1118 | length = sizeof(__u32); |
1109 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1119 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
1110 | length, QETH_PROT_IPV4); | 1120 | length, QETH_PROT_IPV4); |
1121 | if (!iob) | ||
1122 | return -ENOMEM; | ||
1111 | rc = qeth_l3_send_setassparms(card, iob, length, data, | 1123 | rc = qeth_l3_send_setassparms(card, iob, length, data, |
1112 | qeth_l3_default_setassparms_cb, NULL); | 1124 | qeth_l3_default_setassparms_cb, NULL); |
1113 | return rc; | 1125 | return rc; |
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) | |||
1494 | 1506 | ||
1495 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, | 1507 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, |
1496 | QETH_PROT_IPV6); | 1508 | QETH_PROT_IPV6); |
1509 | if (!iob) | ||
1510 | return -ENOMEM; | ||
1497 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1511 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1498 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = | 1512 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = |
1499 | card->info.unique_id; | 1513 | card->info.unique_id; |
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card) | |||
1537 | 1551 | ||
1538 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, | 1552 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, |
1539 | QETH_PROT_IPV6); | 1553 | QETH_PROT_IPV6); |
1554 | if (!iob) | ||
1555 | return -ENOMEM; | ||
1540 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1556 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1541 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = | 1557 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = |
1542 | card->info.unique_id; | 1558 | card->info.unique_id; |
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) | |||
1611 | QETH_DBF_TEXT(SETUP, 2, "diagtrac"); | 1627 | QETH_DBF_TEXT(SETUP, 2, "diagtrac"); |
1612 | 1628 | ||
1613 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 1629 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
1630 | if (!iob) | ||
1631 | return -ENOMEM; | ||
1614 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1632 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1615 | cmd->data.diagass.subcmd_len = 16; | 1633 | cmd->data.diagass.subcmd_len = 16; |
1616 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; | 1634 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; |
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, | |||
2442 | IPA_CMD_ASS_ARP_QUERY_INFO, | 2460 | IPA_CMD_ASS_ARP_QUERY_INFO, |
2443 | sizeof(struct qeth_arp_query_data) - sizeof(char), | 2461 | sizeof(struct qeth_arp_query_data) - sizeof(char), |
2444 | prot); | 2462 | prot); |
2463 | if (!iob) | ||
2464 | return -ENOMEM; | ||
2445 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 2465 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
2446 | cmd->data.setassparms.data.query_arp.request_bits = 0x000F; | 2466 | cmd->data.setassparms.data.query_arp.request_bits = 0x000F; |
2447 | cmd->data.setassparms.data.query_arp.reply_bits = 0; | 2467 | cmd->data.setassparms.data.query_arp.reply_bits = 0; |
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2535 | IPA_CMD_ASS_ARP_ADD_ENTRY, | 2555 | IPA_CMD_ASS_ARP_ADD_ENTRY, |
2536 | sizeof(struct qeth_arp_cache_entry), | 2556 | sizeof(struct qeth_arp_cache_entry), |
2537 | QETH_PROT_IPV4); | 2557 | QETH_PROT_IPV4); |
2558 | if (!iob) | ||
2559 | return -ENOMEM; | ||
2538 | rc = qeth_l3_send_setassparms(card, iob, | 2560 | rc = qeth_l3_send_setassparms(card, iob, |
2539 | sizeof(struct qeth_arp_cache_entry), | 2561 | sizeof(struct qeth_arp_cache_entry), |
2540 | (unsigned long) entry, | 2562 | (unsigned long) entry, |
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2574 | IPA_CMD_ASS_ARP_REMOVE_ENTRY, | 2596 | IPA_CMD_ASS_ARP_REMOVE_ENTRY, |
2575 | 12, | 2597 | 12, |
2576 | QETH_PROT_IPV4); | 2598 | QETH_PROT_IPV4); |
2599 | if (!iob) | ||
2600 | return -ENOMEM; | ||
2577 | rc = qeth_l3_send_setassparms(card, iob, | 2601 | rc = qeth_l3_send_setassparms(card, iob, |
2578 | 12, (unsigned long)buf, | 2602 | 12, (unsigned long)buf, |
2579 | qeth_l3_default_setassparms_cb, NULL); | 2603 | qeth_l3_default_setassparms_cb, NULL); |
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { | |||
3262 | 3286 | ||
3263 | static int qeth_l3_setup_netdev(struct qeth_card *card) | 3287 | static int qeth_l3_setup_netdev(struct qeth_card *card) |
3264 | { | 3288 | { |
3289 | int rc; | ||
3290 | |||
3265 | if (card->info.type == QETH_CARD_TYPE_OSD || | 3291 | if (card->info.type == QETH_CARD_TYPE_OSD || |
3266 | card->info.type == QETH_CARD_TYPE_OSX) { | 3292 | card->info.type == QETH_CARD_TYPE_OSX) { |
3267 | if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || | 3293 | if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || |
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
3293 | return -ENODEV; | 3319 | return -ENODEV; |
3294 | card->dev->flags |= IFF_NOARP; | 3320 | card->dev->flags |= IFF_NOARP; |
3295 | card->dev->netdev_ops = &qeth_l3_netdev_ops; | 3321 | card->dev->netdev_ops = &qeth_l3_netdev_ops; |
3296 | qeth_l3_iqd_read_initial_mac(card); | 3322 | rc = qeth_l3_iqd_read_initial_mac(card); |
3323 | if (rc) | ||
3324 | return rc; | ||
3297 | if (card->options.hsuid[0]) | 3325 | if (card->options.hsuid[0]) |
3298 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); | 3326 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); |
3299 | } else | 3327 | } else |
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3360 | recover_flag = card->state; | 3388 | recover_flag = card->state; |
3361 | rc = qeth_core_hardsetup_card(card); | 3389 | rc = qeth_core_hardsetup_card(card); |
3362 | if (rc) { | 3390 | if (rc) { |
3363 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 3391 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
3364 | rc = -ENODEV; | 3392 | rc = -ENODEV; |
3365 | goto out_remove; | 3393 | goto out_remove; |
3366 | } | 3394 | } |
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3401 | contin: | 3429 | contin: |
3402 | rc = qeth_l3_setadapter_parms(card); | 3430 | rc = qeth_l3_setadapter_parms(card); |
3403 | if (rc) | 3431 | if (rc) |
3404 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 3432 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
3405 | if (!card->options.sniffer) { | 3433 | if (!card->options.sniffer) { |
3406 | rc = qeth_l3_start_ipassists(card); | 3434 | rc = qeth_l3_start_ipassists(card); |
3407 | if (rc) { | 3435 | if (rc) { |
@@ -3410,10 +3438,10 @@ contin: | |||
3410 | } | 3438 | } |
3411 | rc = qeth_l3_setrouting_v4(card); | 3439 | rc = qeth_l3_setrouting_v4(card); |
3412 | if (rc) | 3440 | if (rc) |
3413 | QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); | 3441 | QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc); |
3414 | rc = qeth_l3_setrouting_v6(card); | 3442 | rc = qeth_l3_setrouting_v6(card); |
3415 | if (rc) | 3443 | if (rc) |
3416 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); | 3444 | QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); |
3417 | } | 3445 | } |
3418 | netif_tx_disable(card->dev); | 3446 | netif_tx_disable(card->dev); |
3419 | 3447 | ||
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 1dba62c5cf6a..1efebc9eedfb 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c | |||
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref) | |||
136 | struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; | 136 | struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; |
137 | struct scsi_device *sdev = scsi_dh_data->sdev; | 137 | struct scsi_device *sdev = scsi_dh_data->sdev; |
138 | 138 | ||
139 | scsi_dh->detach(sdev); | ||
140 | |||
139 | spin_lock_irq(sdev->request_queue->queue_lock); | 141 | spin_lock_irq(sdev->request_queue->queue_lock); |
140 | sdev->scsi_dh_data = NULL; | 142 | sdev->scsi_dh_data = NULL; |
141 | spin_unlock_irq(sdev->request_queue->queue_lock); | 143 | spin_unlock_irq(sdev->request_queue->queue_lock); |
142 | 144 | ||
143 | scsi_dh->detach(sdev); | ||
144 | sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); | 145 | sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); |
145 | module_put(scsi_dh->module); | 146 | module_put(scsi_dh->module); |
146 | } | 147 | } |
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index 6776931e25d4..78ce4d61a69b 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c | |||
@@ -813,12 +813,13 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) | |||
813 | pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, | 813 | pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, |
814 | &devcontrol); | 814 | &devcontrol); |
815 | 815 | ||
816 | if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { | 816 | if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > |
817 | PCI_EXP_DEVCTL_READRQ_512B) { | ||
817 | esas2r_log(ESAS2R_LOG_INFO, | 818 | esas2r_log(ESAS2R_LOG_INFO, |
818 | "max read request size > 512B"); | 819 | "max read request size > 512B"); |
819 | 820 | ||
820 | devcontrol &= ~PCI_EXP_DEVCTL_READRQ; | 821 | devcontrol &= ~PCI_EXP_DEVCTL_READRQ; |
821 | devcontrol |= 0x2000; | 822 | devcontrol |= PCI_EXP_DEVCTL_READRQ_512B; |
822 | pci_write_config_word(a->pcid, | 823 | pci_write_config_word(a->pcid, |
823 | pcie_cap_reg + PCI_EXP_DEVCTL, | 824 | pcie_cap_reg + PCI_EXP_DEVCTL, |
824 | devcontrol); | 825 | devcontrol); |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index e02885451425..9b3829931f40 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev) | |||
986 | return -ENXIO; | 986 | return -ENXIO; |
987 | if (!get_device(&sdev->sdev_gendev)) | 987 | if (!get_device(&sdev->sdev_gendev)) |
988 | return -ENXIO; | 988 | return -ENXIO; |
989 | /* We can fail this if we're doing SCSI operations | 989 | /* We can fail try_module_get if we're doing SCSI operations |
990 | * from module exit (like cache flush) */ | 990 | * from module exit (like cache flush) */ |
991 | try_module_get(sdev->host->hostt->module); | 991 | __module_get(sdev->host->hostt->module); |
992 | 992 | ||
993 | return 0; | 993 | return 0; |
994 | } | 994 | } |
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get); | |||
1004 | */ | 1004 | */ |
1005 | void scsi_device_put(struct scsi_device *sdev) | 1005 | void scsi_device_put(struct scsi_device *sdev) |
1006 | { | 1006 | { |
1007 | #ifdef CONFIG_MODULE_UNLOAD | 1007 | module_put(sdev->host->hostt->module); |
1008 | struct module *module = sdev->host->hostt->module; | ||
1009 | |||
1010 | /* The module refcount will be zero if scsi_device_get() | ||
1011 | * was called from a module removal routine */ | ||
1012 | if (module && module_refcount(module) != 0) | ||
1013 | module_put(module); | ||
1014 | #endif | ||
1015 | put_device(&sdev->sdev_gendev); | 1008 | put_device(&sdev->sdev_gendev); |
1016 | } | 1009 | } |
1017 | EXPORT_SYMBOL(scsi_device_put); | 1010 | EXPORT_SYMBOL(scsi_device_put); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 399516925d80..05ea0d49a3a3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2800 | */ | 2800 | */ |
2801 | sd_set_flush_flag(sdkp); | 2801 | sd_set_flush_flag(sdkp); |
2802 | 2802 | ||
2803 | max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), | 2803 | max_xfer = sdkp->max_xfer_blocks; |
2804 | sdkp->max_xfer_blocks); | ||
2805 | max_xfer <<= ilog2(sdp->sector_size) - 9; | 2804 | max_xfer <<= ilog2(sdp->sector_size) - 9; |
2805 | |||
2806 | max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), | ||
2807 | max_xfer); | ||
2806 | blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); | 2808 | blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); |
2807 | set_capacity(disk, sdkp->capacity); | 2809 | set_capacity(disk, sdkp->capacity); |
2808 | sd_config_write_same(sdkp); | 2810 | sd_config_write_same(sdkp); |
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c index 1e824fb1649b..296db7a69c27 100644 --- a/drivers/sfi/sfi_core.c +++ b/drivers/sfi/sfi_core.c | |||
@@ -161,7 +161,7 @@ static int sfi_verify_table(struct sfi_table_header *table) | |||
161 | * Check for common case that we can re-use mapping to SYST, | 161 | * Check for common case that we can re-use mapping to SYST, |
162 | * which requires syst_pa, syst_va to be initialized. | 162 | * which requires syst_pa, syst_va to be initialized. |
163 | */ | 163 | */ |
164 | struct sfi_table_header *sfi_map_table(u64 pa) | 164 | static struct sfi_table_header *sfi_map_table(u64 pa) |
165 | { | 165 | { |
166 | struct sfi_table_header *th; | 166 | struct sfi_table_header *th; |
167 | u32 length; | 167 | u32 length; |
@@ -189,7 +189,7 @@ struct sfi_table_header *sfi_map_table(u64 pa) | |||
189 | * Undoes effect of sfi_map_table() by unmapping table | 189 | * Undoes effect of sfi_map_table() by unmapping table |
190 | * if it did not completely fit on same page as SYST. | 190 | * if it did not completely fit on same page as SYST. |
191 | */ | 191 | */ |
192 | void sfi_unmap_table(struct sfi_table_header *th) | 192 | static void sfi_unmap_table(struct sfi_table_header *th) |
193 | { | 193 | { |
194 | if (!TABLE_ON_PAGE(syst_va, th, th->len)) | 194 | if (!TABLE_ON_PAGE(syst_va, th, th->len)) |
195 | sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ? | 195 | sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ? |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 99829985c1a1..95ccedabba4f 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -185,6 +185,16 @@ config SPI_DAVINCI | |||
185 | help | 185 | help |
186 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. | 186 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. |
187 | 187 | ||
188 | config SPI_DLN2 | ||
189 | tristate "Diolan DLN-2 USB SPI adapter" | ||
190 | depends on MFD_DLN2 | ||
191 | help | ||
192 | If you say yes to this option, support will be included for Diolan | ||
193 | DLN2, a USB to SPI interface. | ||
194 | |||
195 | This driver can also be built as a module. If so, the module | ||
196 | will be called spi-dln2. | ||
197 | |||
188 | config SPI_EFM32 | 198 | config SPI_EFM32 |
189 | tristate "EFM32 SPI controller" | 199 | tristate "EFM32 SPI controller" |
190 | depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) | 200 | depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST) |
@@ -279,7 +289,7 @@ config SPI_FSL_CPM | |||
279 | depends on FSL_SOC | 289 | depends on FSL_SOC |
280 | 290 | ||
281 | config SPI_FSL_SPI | 291 | config SPI_FSL_SPI |
282 | bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" | 292 | tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" |
283 | depends on OF | 293 | depends on OF |
284 | select SPI_FSL_LIB | 294 | select SPI_FSL_LIB |
285 | select SPI_FSL_CPM if FSL_SOC | 295 | select SPI_FSL_CPM if FSL_SOC |
@@ -292,7 +302,6 @@ config SPI_FSL_SPI | |||
292 | 302 | ||
293 | config SPI_FSL_DSPI | 303 | config SPI_FSL_DSPI |
294 | tristate "Freescale DSPI controller" | 304 | tristate "Freescale DSPI controller" |
295 | select SPI_BITBANG | ||
296 | select REGMAP_MMIO | 305 | select REGMAP_MMIO |
297 | depends on SOC_VF610 || COMPILE_TEST | 306 | depends on SOC_VF610 || COMPILE_TEST |
298 | help | 307 | help |
@@ -300,7 +309,7 @@ config SPI_FSL_DSPI | |||
300 | mode. VF610 platform uses the controller. | 309 | mode. VF610 platform uses the controller. |
301 | 310 | ||
302 | config SPI_FSL_ESPI | 311 | config SPI_FSL_ESPI |
303 | bool "Freescale eSPI controller" | 312 | tristate "Freescale eSPI controller" |
304 | depends on FSL_SOC | 313 | depends on FSL_SOC |
305 | select SPI_FSL_LIB | 314 | select SPI_FSL_LIB |
306 | help | 315 | help |
@@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ | |||
460 | config SPI_S3C64XX | 469 | config SPI_S3C64XX |
461 | tristate "Samsung S3C64XX series type SPI" | 470 | tristate "Samsung S3C64XX series type SPI" |
462 | depends on (PLAT_SAMSUNG || ARCH_EXYNOS) | 471 | depends on (PLAT_SAMSUNG || ARCH_EXYNOS) |
463 | select S3C64XX_PL080 if ARCH_S3C64XX | ||
464 | help | 472 | help |
465 | SPI driver for Samsung S3C64XX and newer SoCs. | 473 | SPI driver for Samsung S3C64XX and newer SoCs. |
466 | 474 | ||
@@ -503,6 +511,13 @@ config SPI_SIRF | |||
503 | help | 511 | help |
504 | SPI driver for CSR SiRFprimaII SoCs | 512 | SPI driver for CSR SiRFprimaII SoCs |
505 | 513 | ||
514 | config SPI_ST_SSC4 | ||
515 | tristate "STMicroelectronics SPI SSC-based driver" | ||
516 | depends on ARCH_STI | ||
517 | help | ||
518 | STMicroelectronics SoCs support for SPI. If you say yes to | ||
519 | this option, support will be included for the SSC driven SPI. | ||
520 | |||
506 | config SPI_SUN4I | 521 | config SPI_SUN4I |
507 | tristate "Allwinner A10 SoCs SPI controller" | 522 | tristate "Allwinner A10 SoCs SPI controller" |
508 | depends on ARCH_SUNXI || COMPILE_TEST | 523 | depends on ARCH_SUNXI || COMPILE_TEST |
@@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA | |||
595 | 16 bit words in SPI mode 0, automatically asserting CS on transfer | 610 | 16 bit words in SPI mode 0, automatically asserting CS on transfer |
596 | start and deasserting on end. | 611 | start and deasserting on end. |
597 | 612 | ||
598 | |||
599 | config SPI_NUC900 | 613 | config SPI_NUC900 |
600 | tristate "Nuvoton NUC900 series SPI" | 614 | tristate "Nuvoton NUC900 series SPI" |
601 | depends on ARCH_W90X900 | 615 | depends on ARCH_W90X900 |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6b9d2ac629cc..d8cbf654976b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o | |||
27 | obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o | 27 | obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o |
28 | obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o | 28 | obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o |
29 | obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o | 29 | obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o |
30 | obj-$(CONFIG_SPI_DLN2) += spi-dln2.o | ||
30 | obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o | 31 | obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o |
31 | obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o | 32 | obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o |
32 | obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o | 33 | obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o |
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o | |||
76 | obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o | 77 | obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o |
77 | obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o | 78 | obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o |
78 | obj-$(CONFIG_SPI_SIRF) += spi-sirf.o | 79 | obj-$(CONFIG_SPI_SIRF) += spi-sirf.o |
80 | obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o | ||
79 | obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o | 81 | obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o |
80 | obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o | 82 | obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o |
81 | obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o | 83 | obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 23d8f5f56579..9af7841f2e8c 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master, | |||
1046 | struct atmel_spi_device *asd; | 1046 | struct atmel_spi_device *asd; |
1047 | int timeout; | 1047 | int timeout; |
1048 | int ret; | 1048 | int ret; |
1049 | unsigned long dma_timeout; | ||
1049 | 1050 | ||
1050 | as = spi_master_get_devdata(master); | 1051 | as = spi_master_get_devdata(master); |
1051 | 1052 | ||
@@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master, | |||
1103 | 1104 | ||
1104 | /* interrupts are disabled, so free the lock for schedule */ | 1105 | /* interrupts are disabled, so free the lock for schedule */ |
1105 | atmel_spi_unlock(as); | 1106 | atmel_spi_unlock(as); |
1106 | ret = wait_for_completion_timeout(&as->xfer_completion, | 1107 | dma_timeout = wait_for_completion_timeout(&as->xfer_completion, |
1107 | SPI_DMA_TIMEOUT); | 1108 | SPI_DMA_TIMEOUT); |
1108 | atmel_spi_lock(as); | 1109 | atmel_spi_lock(as); |
1109 | if (WARN_ON(ret == 0)) { | 1110 | if (WARN_ON(dma_timeout == 0)) { |
1110 | dev_err(&spi->dev, | 1111 | dev_err(&spi->dev, "spi transfer timeout\n"); |
1111 | "spi trasfer timeout, err %d\n", ret); | ||
1112 | as->done_status = -EIO; | 1112 | as->done_status = -EIO; |
1113 | } else { | ||
1114 | ret = 0; | ||
1115 | } | 1113 | } |
1116 | 1114 | ||
1117 | if (as->done_status) | 1115 | if (as->done_status) |
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c index 326f47973684..f45e085c01a6 100644 --- a/drivers/spi/spi-au1550.c +++ b/drivers/spi/spi-au1550.c | |||
@@ -15,10 +15,6 @@ | |||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. | 17 | * GNU General Public License for more details. |
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
22 | */ | 18 | */ |
23 | 19 | ||
24 | #include <linux/init.h> | 20 | #include <linux/init.h> |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 98aab457b24d..419a782ab6d5 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -17,10 +17,6 @@ | |||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 | * GNU General Public License for more details. | 19 | * GNU General Public License for more details. |
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | */ | 20 | */ |
25 | 21 | ||
26 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index c20530982e26..e73e2b052c9c 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -13,10 +13,6 @@ | |||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the | ||
19 | * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, | ||
20 | */ | 16 | */ |
21 | 17 | ||
22 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index dc7d2c2d643e..5ef6638d5e8a 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c | |||
@@ -10,10 +10,6 @@ | |||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | 13 | */ |
18 | 14 | ||
19 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c index ee4f91ccd8fd..9a95862986c8 100644 --- a/drivers/spi/spi-butterfly.c +++ b/drivers/spi/spi-butterfly.c | |||
@@ -12,10 +12,6 @@ | |||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | 15 | */ |
20 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
21 | #include <linux/init.h> | 17 | #include <linux/init.h> |
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index 41b5dc4445f6..688956ff5095 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c | |||
@@ -12,11 +12,6 @@ | |||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | * | ||
20 | */ | 15 | */ |
21 | 16 | ||
22 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index b3707badb1e5..5e991065f5b0 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
@@ -11,10 +11,6 @@ | |||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | 14 | */ |
19 | 15 | ||
20 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c new file mode 100644 index 000000000000..3b7d91d94fea --- /dev/null +++ b/drivers/spi/spi-dln2.c | |||
@@ -0,0 +1,881 @@ | |||
1 | /* | ||
2 | * Driver for the Diolan DLN-2 USB-SPI adapter | ||
3 | * | ||
4 | * Copyright (c) 2014 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation, version 2. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/mfd/dln2.h> | ||
15 | #include <linux/spi/spi.h> | ||
16 | #include <linux/pm_runtime.h> | ||
17 | #include <asm/unaligned.h> | ||
18 | |||
19 | #define DLN2_SPI_MODULE_ID 0x02 | ||
20 | #define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID) | ||
21 | |||
22 | /* SPI commands */ | ||
23 | #define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00) | ||
24 | #define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11) | ||
25 | #define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12) | ||
26 | #define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13) | ||
27 | #define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14) | ||
28 | #define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15) | ||
29 | #define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16) | ||
30 | #define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17) | ||
31 | #define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18) | ||
32 | #define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19) | ||
33 | #define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A) | ||
34 | #define DLN2_SPI_READ DLN2_SPI_CMD(0x1B) | ||
35 | #define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C) | ||
36 | #define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20) | ||
37 | #define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21) | ||
38 | #define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22) | ||
39 | #define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23) | ||
40 | #define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24) | ||
41 | #define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25) | ||
42 | #define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26) | ||
43 | #define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27) | ||
44 | #define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28) | ||
45 | #define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B) | ||
46 | #define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C) | ||
47 | #define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D) | ||
48 | #define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E) | ||
49 | #define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F) | ||
50 | #define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30) | ||
51 | #define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31) | ||
52 | #define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32) | ||
53 | #define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33) | ||
54 | #define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34) | ||
55 | #define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35) | ||
56 | #define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36) | ||
57 | #define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37) | ||
58 | #define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38) | ||
59 | #define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39) | ||
60 | #define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A) | ||
61 | #define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40) | ||
62 | #define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41) | ||
63 | #define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42) | ||
64 | #define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43) | ||
65 | #define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44) | ||
66 | #define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45) | ||
67 | #define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46) | ||
68 | #define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47) | ||
69 | #define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48) | ||
70 | #define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49) | ||
71 | #define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A) | ||
72 | #define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B) | ||
73 | #define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C) | ||
74 | |||
75 | #define DLN2_SPI_MAX_XFER_SIZE 256 | ||
76 | #define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16) | ||
77 | #define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0) | ||
78 | #define DLN2_TRANSFERS_WAIT_COMPLETE 1 | ||
79 | #define DLN2_TRANSFERS_CANCEL 0 | ||
80 | #define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000 | ||
81 | |||
82 | struct dln2_spi { | ||
83 | struct platform_device *pdev; | ||
84 | struct spi_master *master; | ||
85 | u8 port; | ||
86 | |||
87 | /* | ||
88 | * This buffer will be used mainly for read/write operations. Since | ||
89 | * they're quite large, we cannot use the stack. Protection is not | ||
90 | * needed because all SPI communication is serialized by the SPI core. | ||
91 | */ | ||
92 | void *buf; | ||
93 | |||
94 | u8 bpw; | ||
95 | u32 speed; | ||
96 | u16 mode; | ||
97 | u8 cs; | ||
98 | }; | ||
99 | |||
100 | /* | ||
101 | * Enable/Disable SPI module. The disable command will wait for transfers to | ||
102 | * complete first. | ||
103 | */ | ||
104 | static int dln2_spi_enable(struct dln2_spi *dln2, bool enable) | ||
105 | { | ||
106 | u16 cmd; | ||
107 | struct { | ||
108 | u8 port; | ||
109 | u8 wait_for_completion; | ||
110 | } tx; | ||
111 | unsigned len = sizeof(tx); | ||
112 | |||
113 | tx.port = dln2->port; | ||
114 | |||
115 | if (enable) { | ||
116 | cmd = DLN2_SPI_ENABLE; | ||
117 | len -= sizeof(tx.wait_for_completion); | ||
118 | } else { | ||
119 | tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE; | ||
120 | cmd = DLN2_SPI_DISABLE; | ||
121 | } | ||
122 | |||
123 | return dln2_transfer_tx(dln2->pdev, cmd, &tx, len); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Select/unselect multiple CS lines. The selected lines will be automatically | ||
128 | * toggled LOW/HIGH by the board firmware during transfers, provided they're | ||
129 | * enabled first. | ||
130 | * | ||
131 | * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation | ||
132 | * will toggle the lines LOW/HIGH automatically. | ||
133 | */ | ||
134 | static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask) | ||
135 | { | ||
136 | struct { | ||
137 | u8 port; | ||
138 | u8 cs; | ||
139 | } tx; | ||
140 | |||
141 | tx.port = dln2->port; | ||
142 | |||
143 | /* | ||
144 | * According to Diolan docs, "a slave device can be selected by changing | ||
145 | * the corresponding bit value to 0". The rest must be set to 1. Hence | ||
146 | * the bitwise NOT in front. | ||
147 | */ | ||
148 | tx.cs = ~cs_mask; | ||
149 | |||
150 | return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx)); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Select one CS line. The other lines will be un-selected. | ||
155 | */ | ||
156 | static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs) | ||
157 | { | ||
158 | return dln2_spi_cs_set(dln2, BIT(cs)); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Enable/disable CS lines for usage. The module has to be disabled first. | ||
163 | */ | ||
164 | static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable) | ||
165 | { | ||
166 | struct { | ||
167 | u8 port; | ||
168 | u8 cs; | ||
169 | } tx; | ||
170 | u16 cmd; | ||
171 | |||
172 | tx.port = dln2->port; | ||
173 | tx.cs = cs_mask; | ||
174 | cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE; | ||
175 | |||
176 | return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx)); | ||
177 | } | ||
178 | |||
179 | static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable) | ||
180 | { | ||
181 | u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0); | ||
182 | |||
183 | return dln2_spi_cs_enable(dln2, cs_mask, enable); | ||
184 | } | ||
185 | |||
186 | static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num) | ||
187 | { | ||
188 | int ret; | ||
189 | struct { | ||
190 | u8 port; | ||
191 | } tx; | ||
192 | struct { | ||
193 | __le16 cs_count; | ||
194 | } rx; | ||
195 | unsigned rx_len = sizeof(rx); | ||
196 | |||
197 | tx.port = dln2->port; | ||
198 | ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx), | ||
199 | &rx, &rx_len); | ||
200 | if (ret < 0) | ||
201 | return ret; | ||
202 | if (rx_len < sizeof(rx)) | ||
203 | return -EPROTO; | ||
204 | |||
205 | *cs_num = le16_to_cpu(rx.cs_count); | ||
206 | |||
207 | dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num); | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq) | ||
213 | { | ||
214 | int ret; | ||
215 | struct { | ||
216 | u8 port; | ||
217 | } tx; | ||
218 | struct { | ||
219 | __le32 speed; | ||
220 | } rx; | ||
221 | unsigned rx_len = sizeof(rx); | ||
222 | |||
223 | tx.port = dln2->port; | ||
224 | |||
225 | ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len); | ||
226 | if (ret < 0) | ||
227 | return ret; | ||
228 | if (rx_len < sizeof(rx)) | ||
229 | return -EPROTO; | ||
230 | |||
231 | *freq = le32_to_cpu(rx.speed); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Get bus min/max frequencies. | ||
238 | */ | ||
239 | static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax) | ||
240 | { | ||
241 | int ret; | ||
242 | |||
243 | ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin); | ||
244 | if (ret < 0) | ||
245 | return ret; | ||
246 | |||
247 | ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax); | ||
248 | if (ret < 0) | ||
249 | return ret; | ||
250 | |||
251 | dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n", | ||
252 | *fmin, *fmax); | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Set the bus speed. The module will automatically round down to the closest | ||
259 | * available frequency and returns it. The module has to be disabled first. | ||
260 | */ | ||
261 | static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed) | ||
262 | { | ||
263 | int ret; | ||
264 | struct { | ||
265 | u8 port; | ||
266 | __le32 speed; | ||
267 | } __packed tx; | ||
268 | struct { | ||
269 | __le32 speed; | ||
270 | } rx; | ||
271 | int rx_len = sizeof(rx); | ||
272 | |||
273 | tx.port = dln2->port; | ||
274 | tx.speed = cpu_to_le32(speed); | ||
275 | |||
276 | ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx), | ||
277 | &rx, &rx_len); | ||
278 | if (ret < 0) | ||
279 | return ret; | ||
280 | if (rx_len < sizeof(rx)) | ||
281 | return -EPROTO; | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * Change CPOL & CPHA. The module has to be disabled first. | ||
288 | */ | ||
289 | static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode) | ||
290 | { | ||
291 | struct { | ||
292 | u8 port; | ||
293 | u8 mode; | ||
294 | } tx; | ||
295 | |||
296 | tx.port = dln2->port; | ||
297 | tx.mode = mode; | ||
298 | |||
299 | return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx)); | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * Change frame size. The module has to be disabled first. | ||
304 | */ | ||
305 | static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw) | ||
306 | { | ||
307 | struct { | ||
308 | u8 port; | ||
309 | u8 bpw; | ||
310 | } tx; | ||
311 | |||
312 | tx.port = dln2->port; | ||
313 | tx.bpw = bpw; | ||
314 | |||
315 | return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE, | ||
316 | &tx, sizeof(tx)); | ||
317 | } | ||
318 | |||
319 | static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2, | ||
320 | u32 *bpw_mask) | ||
321 | { | ||
322 | int ret; | ||
323 | struct { | ||
324 | u8 port; | ||
325 | } tx; | ||
326 | struct { | ||
327 | u8 count; | ||
328 | u8 frame_sizes[36]; | ||
329 | } *rx = dln2->buf; | ||
330 | unsigned rx_len = sizeof(*rx); | ||
331 | int i; | ||
332 | |||
333 | tx.port = dln2->port; | ||
334 | |||
335 | ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES, | ||
336 | &tx, sizeof(tx), rx, &rx_len); | ||
337 | if (ret < 0) | ||
338 | return ret; | ||
339 | if (rx_len < sizeof(*rx)) | ||
340 | return -EPROTO; | ||
341 | if (rx->count > ARRAY_SIZE(rx->frame_sizes)) | ||
342 | return -EPROTO; | ||
343 | |||
344 | *bpw_mask = 0; | ||
345 | for (i = 0; i < rx->count; i++) | ||
346 | *bpw_mask |= BIT(rx->frame_sizes[i] - 1); | ||
347 | |||
348 | dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask); | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Copy the data to DLN2 buffer and change the byte order to LE, requested by | ||
355 | * DLN2 module. SPI core makes sure that the data length is a multiple of word | ||
356 | * size. | ||
357 | */ | ||
358 | static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw) | ||
359 | { | ||
360 | #ifdef __LITTLE_ENDIAN | ||
361 | memcpy(dln2_buf, src, len); | ||
362 | #else | ||
363 | if (bpw <= 8) { | ||
364 | memcpy(dln2_buf, src, len); | ||
365 | } else if (bpw <= 16) { | ||
366 | __le16 *d = (__le16 *)dln2_buf; | ||
367 | u16 *s = (u16 *)src; | ||
368 | |||
369 | len = len / 2; | ||
370 | while (len--) | ||
371 | *d++ = cpu_to_le16p(s++); | ||
372 | } else { | ||
373 | __le32 *d = (__le32 *)dln2_buf; | ||
374 | u32 *s = (u32 *)src; | ||
375 | |||
376 | len = len / 4; | ||
377 | while (len--) | ||
378 | *d++ = cpu_to_le32p(s++); | ||
379 | } | ||
380 | #endif | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2 | ||
387 | * buffer is LE ordered. SPI core makes sure that the data length is a multiple | ||
388 | * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make | ||
389 | * sure we avoid unaligned accesses for 32 bit case. | ||
390 | */ | ||
391 | static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw) | ||
392 | { | ||
393 | #ifdef __LITTLE_ENDIAN | ||
394 | memcpy(dest, dln2_buf, len); | ||
395 | #else | ||
396 | if (bpw <= 8) { | ||
397 | memcpy(dest, dln2_buf, len); | ||
398 | } else if (bpw <= 16) { | ||
399 | u16 *d = (u16 *)dest; | ||
400 | __le16 *s = (__le16 *)dln2_buf; | ||
401 | |||
402 | len = len / 2; | ||
403 | while (len--) | ||
404 | *d++ = le16_to_cpup(s++); | ||
405 | } else { | ||
406 | u32 *d = (u32 *)dest; | ||
407 | __le32 *s = (__le32 *)dln2_buf; | ||
408 | |||
409 | len = len / 4; | ||
410 | while (len--) | ||
411 | *d++ = get_unaligned_le32(s++); | ||
412 | } | ||
413 | #endif | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Perform one write operation. | ||
420 | */ | ||
421 | static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data, | ||
422 | u16 data_len, u8 attr) | ||
423 | { | ||
424 | struct { | ||
425 | u8 port; | ||
426 | __le16 size; | ||
427 | u8 attr; | ||
428 | u8 buf[DLN2_SPI_MAX_XFER_SIZE]; | ||
429 | } __packed *tx = dln2->buf; | ||
430 | unsigned tx_len; | ||
431 | |||
432 | BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE); | ||
433 | |||
434 | if (data_len > DLN2_SPI_MAX_XFER_SIZE) | ||
435 | return -EINVAL; | ||
436 | |||
437 | tx->port = dln2->port; | ||
438 | tx->size = cpu_to_le16(data_len); | ||
439 | tx->attr = attr; | ||
440 | |||
441 | dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw); | ||
442 | |||
443 | tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE; | ||
444 | return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len); | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * Perform one read operation. | ||
449 | */ | ||
450 | static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data, | ||
451 | u16 data_len, u8 attr) | ||
452 | { | ||
453 | int ret; | ||
454 | struct { | ||
455 | u8 port; | ||
456 | __le16 size; | ||
457 | u8 attr; | ||
458 | } __packed tx; | ||
459 | struct { | ||
460 | __le16 size; | ||
461 | u8 buf[DLN2_SPI_MAX_XFER_SIZE]; | ||
462 | } __packed *rx = dln2->buf; | ||
463 | unsigned rx_len = sizeof(*rx); | ||
464 | |||
465 | BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE); | ||
466 | |||
467 | if (data_len > DLN2_SPI_MAX_XFER_SIZE) | ||
468 | return -EINVAL; | ||
469 | |||
470 | tx.port = dln2->port; | ||
471 | tx.size = cpu_to_le16(data_len); | ||
472 | tx.attr = attr; | ||
473 | |||
474 | ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx), | ||
475 | rx, &rx_len); | ||
476 | if (ret < 0) | ||
477 | return ret; | ||
478 | if (rx_len < sizeof(rx->size) + data_len) | ||
479 | return -EPROTO; | ||
480 | if (le16_to_cpu(rx->size) != data_len) | ||
481 | return -EPROTO; | ||
482 | |||
483 | dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw); | ||
484 | |||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * Perform one write & read operation. | ||
490 | */ | ||
491 | static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data, | ||
492 | u8 *rx_data, u16 data_len, u8 attr) | ||
493 | { | ||
494 | int ret; | ||
495 | struct { | ||
496 | u8 port; | ||
497 | __le16 size; | ||
498 | u8 attr; | ||
499 | u8 buf[DLN2_SPI_MAX_XFER_SIZE]; | ||
500 | } __packed *tx; | ||
501 | struct { | ||
502 | __le16 size; | ||
503 | u8 buf[DLN2_SPI_MAX_XFER_SIZE]; | ||
504 | } __packed *rx; | ||
505 | unsigned tx_len, rx_len; | ||
506 | |||
507 | BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE || | ||
508 | sizeof(*rx) > DLN2_SPI_BUF_SIZE); | ||
509 | |||
510 | if (data_len > DLN2_SPI_MAX_XFER_SIZE) | ||
511 | return -EINVAL; | ||
512 | |||
513 | /* | ||
514 | * Since this is a pseudo full-duplex communication, we're perfectly | ||
515 | * safe to use the same buffer for both tx and rx. When DLN2 sends the | ||
516 | * response back, with the rx data, we don't need the tx buffer anymore. | ||
517 | */ | ||
518 | tx = dln2->buf; | ||
519 | rx = dln2->buf; | ||
520 | |||
521 | tx->port = dln2->port; | ||
522 | tx->size = cpu_to_le16(data_len); | ||
523 | tx->attr = attr; | ||
524 | |||
525 | dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw); | ||
526 | |||
527 | tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE; | ||
528 | rx_len = sizeof(*rx); | ||
529 | |||
530 | ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len, | ||
531 | rx, &rx_len); | ||
532 | if (ret < 0) | ||
533 | return ret; | ||
534 | if (rx_len < sizeof(rx->size) + data_len) | ||
535 | return -EPROTO; | ||
536 | if (le16_to_cpu(rx->size) != data_len) | ||
537 | return -EPROTO; | ||
538 | |||
539 | dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw); | ||
540 | |||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Read/Write wrapper. It will automatically split an operation into multiple | ||
546 | * single ones due to device buffer constraints. | ||
547 | */ | ||
548 | static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data, | ||
549 | u8 *rx_data, u16 data_len, u8 attr) { | ||
550 | int ret; | ||
551 | u16 len; | ||
552 | u8 temp_attr; | ||
553 | u16 remaining = data_len; | ||
554 | u16 offset; | ||
555 | |||
556 | do { | ||
557 | if (remaining > DLN2_SPI_MAX_XFER_SIZE) { | ||
558 | len = DLN2_SPI_MAX_XFER_SIZE; | ||
559 | temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW; | ||
560 | } else { | ||
561 | len = remaining; | ||
562 | temp_attr = attr; | ||
563 | } | ||
564 | |||
565 | offset = data_len - remaining; | ||
566 | |||
567 | if (tx_data && rx_data) { | ||
568 | ret = dln2_spi_read_write_one(dln2, | ||
569 | tx_data + offset, | ||
570 | rx_data + offset, | ||
571 | len, temp_attr); | ||
572 | } else if (tx_data) { | ||
573 | ret = dln2_spi_write_one(dln2, | ||
574 | tx_data + offset, | ||
575 | len, temp_attr); | ||
576 | } else if (rx_data) { | ||
577 | ret = dln2_spi_read_one(dln2, | ||
578 | rx_data + offset, | ||
579 | len, temp_attr); | ||
580 | } else { | ||
581 | return -EINVAL; | ||
582 | } | ||
583 | |||
584 | if (ret < 0) | ||
585 | return ret; | ||
586 | |||
587 | remaining -= len; | ||
588 | } while (remaining); | ||
589 | |||
590 | return 0; | ||
591 | } | ||
592 | |||
593 | static int dln2_spi_prepare_message(struct spi_master *master, | ||
594 | struct spi_message *message) | ||
595 | { | ||
596 | int ret; | ||
597 | struct dln2_spi *dln2 = spi_master_get_devdata(master); | ||
598 | struct spi_device *spi = message->spi; | ||
599 | |||
600 | if (dln2->cs != spi->chip_select) { | ||
601 | ret = dln2_spi_cs_set_one(dln2, spi->chip_select); | ||
602 | if (ret < 0) | ||
603 | return ret; | ||
604 | |||
605 | dln2->cs = spi->chip_select; | ||
606 | } | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed, | ||
612 | u8 bpw, u8 mode) | ||
613 | { | ||
614 | int ret; | ||
615 | bool bus_setup_change; | ||
616 | |||
617 | bus_setup_change = dln2->speed != speed || dln2->mode != mode || | ||
618 | dln2->bpw != bpw; | ||
619 | |||
620 | if (!bus_setup_change) | ||
621 | return 0; | ||
622 | |||
623 | ret = dln2_spi_enable(dln2, false); | ||
624 | if (ret < 0) | ||
625 | return ret; | ||
626 | |||
627 | if (dln2->speed != speed) { | ||
628 | ret = dln2_spi_set_speed(dln2, speed); | ||
629 | if (ret < 0) | ||
630 | return ret; | ||
631 | |||
632 | dln2->speed = speed; | ||
633 | } | ||
634 | |||
635 | if (dln2->mode != mode) { | ||
636 | ret = dln2_spi_set_mode(dln2, mode & 0x3); | ||
637 | if (ret < 0) | ||
638 | return ret; | ||
639 | |||
640 | dln2->mode = mode; | ||
641 | } | ||
642 | |||
643 | if (dln2->bpw != bpw) { | ||
644 | ret = dln2_spi_set_bpw(dln2, bpw); | ||
645 | if (ret < 0) | ||
646 | return ret; | ||
647 | |||
648 | dln2->bpw = bpw; | ||
649 | } | ||
650 | |||
651 | return dln2_spi_enable(dln2, true); | ||
652 | } | ||
653 | |||
654 | static int dln2_spi_transfer_one(struct spi_master *master, | ||
655 | struct spi_device *spi, | ||
656 | struct spi_transfer *xfer) | ||
657 | { | ||
658 | struct dln2_spi *dln2 = spi_master_get_devdata(master); | ||
659 | int status; | ||
660 | u8 attr = 0; | ||
661 | |||
662 | status = dln2_spi_transfer_setup(dln2, xfer->speed_hz, | ||
663 | xfer->bits_per_word, | ||
664 | spi->mode); | ||
665 | if (status < 0) { | ||
666 | dev_err(&dln2->pdev->dev, "Cannot setup transfer\n"); | ||
667 | return status; | ||
668 | } | ||
669 | |||
670 | if (!xfer->cs_change && !spi_transfer_is_last(master, xfer)) | ||
671 | attr = DLN2_SPI_ATTR_LEAVE_SS_LOW; | ||
672 | |||
673 | status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf, | ||
674 | xfer->len, attr); | ||
675 | if (status < 0) | ||
676 | dev_err(&dln2->pdev->dev, "write/read failed!\n"); | ||
677 | |||
678 | return status; | ||
679 | } | ||
680 | |||
681 | static int dln2_spi_probe(struct platform_device *pdev) | ||
682 | { | ||
683 | struct spi_master *master; | ||
684 | struct dln2_spi *dln2; | ||
685 | struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev); | ||
686 | int ret; | ||
687 | |||
688 | master = spi_alloc_master(&pdev->dev, sizeof(*dln2)); | ||
689 | if (!master) | ||
690 | return -ENOMEM; | ||
691 | |||
692 | platform_set_drvdata(pdev, master); | ||
693 | |||
694 | dln2 = spi_master_get_devdata(master); | ||
695 | |||
696 | dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL); | ||
697 | if (!dln2->buf) { | ||
698 | ret = -ENOMEM; | ||
699 | goto exit_free_master; | ||
700 | } | ||
701 | |||
702 | dln2->master = master; | ||
703 | dln2->pdev = pdev; | ||
704 | dln2->port = pdata->port; | ||
705 | /* cs/mode can never be 0xff, so the first transfer will set them */ | ||
706 | dln2->cs = 0xff; | ||
707 | dln2->mode = 0xff; | ||
708 | |||
709 | /* disable SPI module before continuing with the setup */ | ||
710 | ret = dln2_spi_enable(dln2, false); | ||
711 | if (ret < 0) { | ||
712 | dev_err(&pdev->dev, "Failed to disable SPI module\n"); | ||
713 | goto exit_free_master; | ||
714 | } | ||
715 | |||
716 | ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect); | ||
717 | if (ret < 0) { | ||
718 | dev_err(&pdev->dev, "Failed to get number of CS pins\n"); | ||
719 | goto exit_free_master; | ||
720 | } | ||
721 | |||
722 | ret = dln2_spi_get_speed_range(dln2, | ||
723 | &master->min_speed_hz, | ||
724 | &master->max_speed_hz); | ||
725 | if (ret < 0) { | ||
726 | dev_err(&pdev->dev, "Failed to read bus min/max freqs\n"); | ||
727 | goto exit_free_master; | ||
728 | } | ||
729 | |||
730 | ret = dln2_spi_get_supported_frame_sizes(dln2, | ||
731 | &master->bits_per_word_mask); | ||
732 | if (ret < 0) { | ||
733 | dev_err(&pdev->dev, "Failed to read supported frame sizes\n"); | ||
734 | goto exit_free_master; | ||
735 | } | ||
736 | |||
737 | ret = dln2_spi_cs_enable_all(dln2, true); | ||
738 | if (ret < 0) { | ||
739 | dev_err(&pdev->dev, "Failed to enable CS pins\n"); | ||
740 | goto exit_free_master; | ||
741 | } | ||
742 | |||
743 | master->bus_num = -1; | ||
744 | master->mode_bits = SPI_CPOL | SPI_CPHA; | ||
745 | master->prepare_message = dln2_spi_prepare_message; | ||
746 | master->transfer_one = dln2_spi_transfer_one; | ||
747 | master->auto_runtime_pm = true; | ||
748 | |||
749 | /* enable SPI module, we're good to go */ | ||
750 | ret = dln2_spi_enable(dln2, true); | ||
751 | if (ret < 0) { | ||
752 | dev_err(&pdev->dev, "Failed to enable SPI module\n"); | ||
753 | goto exit_free_master; | ||
754 | } | ||
755 | |||
756 | pm_runtime_set_autosuspend_delay(&pdev->dev, | ||
757 | DLN2_RPM_AUTOSUSPEND_TIMEOUT); | ||
758 | pm_runtime_use_autosuspend(&pdev->dev); | ||
759 | pm_runtime_set_active(&pdev->dev); | ||
760 | pm_runtime_enable(&pdev->dev); | ||
761 | |||
762 | ret = devm_spi_register_master(&pdev->dev, master); | ||
763 | if (ret < 0) { | ||
764 | dev_err(&pdev->dev, "Failed to register master\n"); | ||
765 | goto exit_register; | ||
766 | } | ||
767 | |||
768 | return ret; | ||
769 | |||
770 | exit_register: | ||
771 | pm_runtime_disable(&pdev->dev); | ||
772 | pm_runtime_set_suspended(&pdev->dev); | ||
773 | |||
774 | if (dln2_spi_enable(dln2, false) < 0) | ||
775 | dev_err(&pdev->dev, "Failed to disable SPI module\n"); | ||
776 | exit_free_master: | ||
777 | spi_master_put(master); | ||
778 | |||
779 | return ret; | ||
780 | } | ||
781 | |||
782 | static int dln2_spi_remove(struct platform_device *pdev) | ||
783 | { | ||
784 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | ||
785 | struct dln2_spi *dln2 = spi_master_get_devdata(master); | ||
786 | |||
787 | pm_runtime_disable(&pdev->dev); | ||
788 | |||
789 | if (dln2_spi_enable(dln2, false) < 0) | ||
790 | dev_err(&pdev->dev, "Failed to disable SPI module\n"); | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | #ifdef CONFIG_PM_SLEEP | ||
796 | static int dln2_spi_suspend(struct device *dev) | ||
797 | { | ||
798 | int ret; | ||
799 | struct spi_master *master = dev_get_drvdata(dev); | ||
800 | struct dln2_spi *dln2 = spi_master_get_devdata(master); | ||
801 | |||
802 | ret = spi_master_suspend(master); | ||
803 | if (ret < 0) | ||
804 | return ret; | ||
805 | |||
806 | if (!pm_runtime_suspended(dev)) { | ||
807 | ret = dln2_spi_enable(dln2, false); | ||
808 | if (ret < 0) | ||
809 | return ret; | ||
810 | } | ||
811 | |||
812 | /* | ||
813 | * USB power may be cut off during sleep. Resetting the following | ||
814 | * parameters will force the board to be set up before first transfer. | ||
815 | */ | ||
816 | dln2->cs = 0xff; | ||
817 | dln2->speed = 0; | ||
818 | dln2->bpw = 0; | ||
819 | dln2->mode = 0xff; | ||
820 | |||
821 | return 0; | ||
822 | } | ||
823 | |||
824 | static int dln2_spi_resume(struct device *dev) | ||
825 | { | ||
826 | int ret; | ||
827 | struct spi_master *master = dev_get_drvdata(dev); | ||
828 | struct dln2_spi *dln2 = spi_master_get_devdata(master); | ||
829 | |||
830 | if (!pm_runtime_suspended(dev)) { | ||
831 | ret = dln2_spi_cs_enable_all(dln2, true); | ||
832 | if (ret < 0) | ||
833 | return ret; | ||
834 | |||
835 | ret = dln2_spi_enable(dln2, true); | ||
836 | if (ret < 0) | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | return spi_master_resume(master); | ||
841 | } | ||
842 | #endif /* CONFIG_PM_SLEEP */ | ||
843 | |||
844 | #ifdef CONFIG_PM | ||
845 | static int dln2_spi_runtime_suspend(struct device *dev) | ||
846 | { | ||
847 | struct spi_master *master = dev_get_drvdata(dev); | ||
848 | struct dln2_spi *dln2 = spi_master_get_devdata(master); | ||
849 | |||
850 | return dln2_spi_enable(dln2, false); | ||
851 | } | ||
852 | |||
853 | static int dln2_spi_runtime_resume(struct device *dev) | ||
854 | { | ||
855 | struct spi_master *master = dev_get_drvdata(dev); | ||
856 | struct dln2_spi *dln2 = spi_master_get_devdata(master); | ||
857 | |||
858 | return dln2_spi_enable(dln2, true); | ||
859 | } | ||
860 | #endif /* CONFIG_PM */ | ||
861 | |||
862 | static const struct dev_pm_ops dln2_spi_pm = { | ||
863 | SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume) | ||
864 | SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend, | ||
865 | dln2_spi_runtime_resume, NULL) | ||
866 | }; | ||
867 | |||
868 | static struct platform_driver spi_dln2_driver = { | ||
869 | .driver = { | ||
870 | .name = "dln2-spi", | ||
871 | .pm = &dln2_spi_pm, | ||
872 | }, | ||
873 | .probe = dln2_spi_probe, | ||
874 | .remove = dln2_spi_remove, | ||
875 | }; | ||
876 | module_platform_driver(spi_dln2_driver); | ||
877 | |||
878 | MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface"); | ||
879 | MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>"); | ||
880 | MODULE_LICENSE("GPL v2"); | ||
881 | MODULE_ALIAS("platform:dln2-spi"); | ||
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 7281316a5ecb..a0197fd4e95c 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = { | |||
247 | 247 | ||
248 | /* Some specific info for SPI0 controller on Intel MID */ | 248 | /* Some specific info for SPI0 controller on Intel MID */ |
249 | 249 | ||
250 | /* HW info for MRST CLk Control Unit, one 32b reg */ | 250 | /* HW info for MRST Clk Control Unit, 32b reg per controller */ |
251 | #define MRST_SPI_CLK_BASE 100000000 /* 100m */ | 251 | #define MRST_SPI_CLK_BASE 100000000 /* 100m */ |
252 | #define MRST_CLK_SPI0_REG 0xff11d86c | 252 | #define MRST_CLK_SPI_REG 0xff11d86c |
253 | #define CLK_SPI_BDIV_OFFSET 0 | 253 | #define CLK_SPI_BDIV_OFFSET 0 |
254 | #define CLK_SPI_BDIV_MASK 0x00000007 | 254 | #define CLK_SPI_BDIV_MASK 0x00000007 |
255 | #define CLK_SPI_CDIV_OFFSET 9 | 255 | #define CLK_SPI_CDIV_OFFSET 9 |
@@ -261,17 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws) | |||
261 | void __iomem *clk_reg; | 261 | void __iomem *clk_reg; |
262 | u32 clk_cdiv; | 262 | u32 clk_cdiv; |
263 | 263 | ||
264 | clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); | 264 | clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16); |
265 | if (!clk_reg) | 265 | if (!clk_reg) |
266 | return -ENOMEM; | 266 | return -ENOMEM; |
267 | 267 | ||
268 | /* get SPI controller operating freq info */ | 268 | /* Get SPI controller operating freq info */ |
269 | clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; | 269 | clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32)); |
270 | clk_cdiv &= CLK_SPI_CDIV_MASK; | ||
271 | clk_cdiv >>= CLK_SPI_CDIV_OFFSET; | ||
270 | dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); | 272 | dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); |
271 | iounmap(clk_reg); | ||
272 | 273 | ||
273 | dws->num_cs = 16; | 274 | iounmap(clk_reg); |
274 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | ||
275 | 275 | ||
276 | #ifdef CONFIG_SPI_DW_MID_DMA | 276 | #ifdef CONFIG_SPI_DW_MID_DMA |
277 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); | 277 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); |
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index ba68da12cdf0..5ba331047cbe 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c | |||
@@ -30,10 +30,20 @@ struct dw_spi_pci { | |||
30 | 30 | ||
31 | struct spi_pci_desc { | 31 | struct spi_pci_desc { |
32 | int (*setup)(struct dw_spi *); | 32 | int (*setup)(struct dw_spi *); |
33 | u16 num_cs; | ||
34 | u16 bus_num; | ||
33 | }; | 35 | }; |
34 | 36 | ||
35 | static struct spi_pci_desc spi_pci_mid_desc = { | 37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { |
36 | .setup = dw_spi_mid_init, | 38 | .setup = dw_spi_mid_init, |
39 | .num_cs = 32, | ||
40 | .bus_num = 0, | ||
41 | }; | ||
42 | |||
43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { | ||
44 | .setup = dw_spi_mid_init, | ||
45 | .num_cs = 4, | ||
46 | .bus_num = 1, | ||
37 | }; | 47 | }; |
38 | 48 | ||
39 | static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 49 | static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
@@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
65 | 75 | ||
66 | dws->regs = pcim_iomap_table(pdev)[pci_bar]; | 76 | dws->regs = pcim_iomap_table(pdev)[pci_bar]; |
67 | 77 | ||
68 | dws->bus_num = 0; | ||
69 | dws->num_cs = 4; | ||
70 | dws->irq = pdev->irq; | 78 | dws->irq = pdev->irq; |
71 | 79 | ||
72 | /* | 80 | /* |
73 | * Specific handling for paltforms, like dma setup, | 81 | * Specific handling for paltforms, like dma setup, |
74 | * clock rate, FIFO depth. | 82 | * clock rate, FIFO depth. |
75 | */ | 83 | */ |
76 | if (desc && desc->setup) { | 84 | if (desc) { |
77 | ret = desc->setup(dws); | 85 | dws->num_cs = desc->num_cs; |
78 | if (ret) | 86 | dws->bus_num = desc->bus_num; |
79 | return ret; | 87 | |
88 | if (desc->setup) { | ||
89 | ret = desc->setup(dws); | ||
90 | if (ret) | ||
91 | return ret; | ||
92 | } | ||
93 | } else { | ||
94 | return -ENODEV; | ||
80 | } | 95 | } |
81 | 96 | ||
82 | ret = dw_spi_add_host(&pdev->dev, dws); | 97 | ret = dw_spi_add_host(&pdev->dev, dws); |
@@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume); | |||
121 | 136 | ||
122 | static const struct pci_device_id pci_ids[] = { | 137 | static const struct pci_device_id pci_ids[] = { |
123 | /* Intel MID platform SPI controller 0 */ | 138 | /* Intel MID platform SPI controller 0 */ |
124 | { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc}, | 139 | /* |
140 | * The access to the device 8086:0801 is disabled by HW, since it's | ||
141 | * exclusively used by SCU to communicate with MSIC. | ||
142 | */ | ||
143 | /* Intel MID platform SPI controller 1 */ | ||
144 | { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1}, | ||
145 | /* Intel MID platform SPI controller 2 */ | ||
146 | { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2}, | ||
125 | {}, | 147 | {}, |
126 | }; | 148 | }; |
127 | 149 | ||
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index d0d5542efc06..5a97a62b298a 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
@@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi) | |||
608 | } | 608 | } |
609 | 609 | ||
610 | /* Restart the controller, disable all interrupts, clean rx fifo */ | 610 | /* Restart the controller, disable all interrupts, clean rx fifo */ |
611 | static void spi_hw_init(struct dw_spi *dws) | 611 | static void spi_hw_init(struct device *dev, struct dw_spi *dws) |
612 | { | 612 | { |
613 | spi_enable_chip(dws, 0); | 613 | spi_enable_chip(dws, 0); |
614 | spi_mask_intr(dws, 0xff); | 614 | spi_mask_intr(dws, 0xff); |
@@ -621,14 +621,15 @@ static void spi_hw_init(struct dw_spi *dws) | |||
621 | if (!dws->fifo_len) { | 621 | if (!dws->fifo_len) { |
622 | u32 fifo; | 622 | u32 fifo; |
623 | 623 | ||
624 | for (fifo = 2; fifo <= 257; fifo++) { | 624 | for (fifo = 2; fifo <= 256; fifo++) { |
625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); |
626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) |
627 | break; | 627 | break; |
628 | } | 628 | } |
629 | |||
630 | dws->fifo_len = (fifo == 257) ? 0 : fifo; | ||
631 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 629 | dw_writew(dws, DW_SPI_TXFLTR, 0); |
630 | |||
631 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; | ||
632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); | ||
632 | } | 633 | } |
633 | } | 634 | } |
634 | 635 | ||
@@ -668,12 +669,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
668 | master->dev.of_node = dev->of_node; | 669 | master->dev.of_node = dev->of_node; |
669 | 670 | ||
670 | /* Basic HW init */ | 671 | /* Basic HW init */ |
671 | spi_hw_init(dws); | 672 | spi_hw_init(dev, dws); |
672 | 673 | ||
673 | if (dws->dma_ops && dws->dma_ops->dma_init) { | 674 | if (dws->dma_ops && dws->dma_ops->dma_init) { |
674 | ret = dws->dma_ops->dma_init(dws); | 675 | ret = dws->dma_ops->dma_init(dws); |
675 | if (ret) { | 676 | if (ret) { |
676 | dev_warn(&master->dev, "DMA init failed\n"); | 677 | dev_warn(dev, "DMA init failed\n"); |
677 | dws->dma_inited = 0; | 678 | dws->dma_inited = 0; |
678 | } | 679 | } |
679 | } | 680 | } |
@@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws) | |||
731 | { | 732 | { |
732 | int ret; | 733 | int ret; |
733 | 734 | ||
734 | spi_hw_init(dws); | 735 | spi_hw_init(&dws->master->dev, dws); |
735 | ret = spi_master_resume(dws->master); | 736 | ret = spi_master_resume(dws->master); |
736 | if (ret) | 737 | if (ret) |
737 | dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); | 738 | dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); |
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c index 912b9037e9cf..286b2c81fc6b 100644 --- a/drivers/spi/spi-falcon.c +++ b/drivers/spi/spi-falcon.c | |||
@@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi) | |||
353 | return 0; | 353 | return 0; |
354 | } | 354 | } |
355 | 355 | ||
356 | static int falcon_sflash_prepare_xfer(struct spi_master *master) | ||
357 | { | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static int falcon_sflash_unprepare_xfer(struct spi_master *master) | ||
362 | { | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static int falcon_sflash_xfer_one(struct spi_master *master, | 356 | static int falcon_sflash_xfer_one(struct spi_master *master, |
367 | struct spi_message *m) | 357 | struct spi_message *m) |
368 | { | 358 | { |
@@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev) | |||
420 | master->mode_bits = SPI_MODE_3; | 410 | master->mode_bits = SPI_MODE_3; |
421 | master->flags = SPI_MASTER_HALF_DUPLEX; | 411 | master->flags = SPI_MASTER_HALF_DUPLEX; |
422 | master->setup = falcon_sflash_setup; | 412 | master->setup = falcon_sflash_setup; |
423 | master->prepare_transfer_hardware = falcon_sflash_prepare_xfer; | ||
424 | master->transfer_one_message = falcon_sflash_xfer_one; | 413 | master->transfer_one_message = falcon_sflash_xfer_one; |
425 | master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer; | ||
426 | master->dev.of_node = pdev->dev.of_node; | 414 | master->dev.of_node = pdev->dev.of_node; |
427 | 415 | ||
428 | ret = devm_spi_register_master(&pdev->dev, master); | 416 | ret = devm_spi_register_master(&pdev->dev, master); |
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index e85ab1cb17a2..9c46a3058743 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
21 | #include <linux/fsl_devices.h> | 21 | #include <linux/fsl_devices.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | ||
23 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
24 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
25 | #include <linux/types.h> | 26 | #include <linux/types.h> |
@@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) | |||
68 | } | 69 | } |
69 | } | 70 | } |
70 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx); | ||
71 | 73 | ||
72 | static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) | 74 | static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) |
73 | { | 75 | { |
@@ -162,6 +164,7 @@ err_rx_dma: | |||
162 | dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); | 164 | dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); |
163 | return -ENOMEM; | 165 | return -ENOMEM; |
164 | } | 166 | } |
167 | EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs); | ||
165 | 168 | ||
166 | void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) | 169 | void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) |
167 | { | 170 | { |
@@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) | |||
174 | dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); | 177 | dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); |
175 | mspi->xfer_in_progress = NULL; | 178 | mspi->xfer_in_progress = NULL; |
176 | } | 179 | } |
180 | EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete); | ||
177 | 181 | ||
178 | void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) | 182 | void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) |
179 | { | 183 | { |
@@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) | |||
198 | else | 202 | else |
199 | complete(&mspi->done); | 203 | complete(&mspi->done); |
200 | } | 204 | } |
205 | EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq); | ||
201 | 206 | ||
202 | static void *fsl_spi_alloc_dummy_rx(void) | 207 | static void *fsl_spi_alloc_dummy_rx(void) |
203 | { | 208 | { |
@@ -375,6 +380,7 @@ err_pram: | |||
375 | fsl_spi_free_dummy_rx(); | 380 | fsl_spi_free_dummy_rx(); |
376 | return -ENOMEM; | 381 | return -ENOMEM; |
377 | } | 382 | } |
383 | EXPORT_SYMBOL_GPL(fsl_spi_cpm_init); | ||
378 | 384 | ||
379 | void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) | 385 | void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) |
380 | { | 386 | { |
@@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) | |||
389 | cpm_muram_free(cpm_muram_offset(mspi->pram)); | 395 | cpm_muram_free(cpm_muram_offset(mspi->pram)); |
390 | fsl_spi_free_dummy_rx(); | 396 | fsl_spi_free_dummy_rx(); |
391 | } | 397 | } |
398 | EXPORT_SYMBOL_GPL(fsl_spi_cpm_free); | ||
399 | |||
400 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 4cda994d3f40..d1a39249704a 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
@@ -106,7 +106,7 @@ struct chip_data { | |||
106 | }; | 106 | }; |
107 | 107 | ||
108 | struct fsl_dspi { | 108 | struct fsl_dspi { |
109 | struct spi_bitbang bitbang; | 109 | struct spi_master *master; |
110 | struct platform_device *pdev; | 110 | struct platform_device *pdev; |
111 | 111 | ||
112 | struct regmap *regmap; | 112 | struct regmap *regmap; |
@@ -114,6 +114,7 @@ struct fsl_dspi { | |||
114 | struct clk *clk; | 114 | struct clk *clk; |
115 | 115 | ||
116 | struct spi_transfer *cur_transfer; | 116 | struct spi_transfer *cur_transfer; |
117 | struct spi_message *cur_msg; | ||
117 | struct chip_data *cur_chip; | 118 | struct chip_data *cur_chip; |
118 | size_t len; | 119 | size_t len; |
119 | void *tx; | 120 | void *tx; |
@@ -123,6 +124,7 @@ struct fsl_dspi { | |||
123 | char dataflags; | 124 | char dataflags; |
124 | u8 cs; | 125 | u8 cs; |
125 | u16 void_write_data; | 126 | u16 void_write_data; |
127 | u32 cs_change; | ||
126 | 128 | ||
127 | wait_queue_head_t waitq; | 129 | wait_queue_head_t waitq; |
128 | u32 waitflags; | 130 | u32 waitflags; |
@@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi) | |||
225 | if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { | 227 | if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { |
226 | /* last transfer in the transfer */ | 228 | /* last transfer in the transfer */ |
227 | dspi_pushr |= SPI_PUSHR_EOQ; | 229 | dspi_pushr |= SPI_PUSHR_EOQ; |
230 | if ((dspi->cs_change) && (!dspi->len)) | ||
231 | dspi_pushr &= ~SPI_PUSHR_CONT; | ||
228 | } else if (tx_word && (dspi->len == 1)) | 232 | } else if (tx_word && (dspi->len == 1)) |
229 | dspi_pushr |= SPI_PUSHR_EOQ; | 233 | dspi_pushr |= SPI_PUSHR_EOQ; |
230 | 234 | ||
@@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi) | |||
246 | int rx_count = 0; | 250 | int rx_count = 0; |
247 | int rx_word = is_double_byte_mode(dspi); | 251 | int rx_word = is_double_byte_mode(dspi); |
248 | u16 d; | 252 | u16 d; |
253 | |||
249 | while ((dspi->rx < dspi->rx_end) | 254 | while ((dspi->rx < dspi->rx_end) |
250 | && (rx_count < DSPI_FIFO_SIZE)) { | 255 | && (rx_count < DSPI_FIFO_SIZE)) { |
251 | if (rx_word) { | 256 | if (rx_word) { |
@@ -276,86 +281,89 @@ static int dspi_transfer_read(struct fsl_dspi *dspi) | |||
276 | return rx_count; | 281 | return rx_count; |
277 | } | 282 | } |
278 | 283 | ||
279 | static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t) | 284 | static int dspi_transfer_one_message(struct spi_master *master, |
285 | struct spi_message *message) | ||
280 | { | 286 | { |
281 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | 287 | struct fsl_dspi *dspi = spi_master_get_devdata(master); |
282 | dspi->cur_transfer = t; | 288 | struct spi_device *spi = message->spi; |
283 | dspi->cur_chip = spi_get_ctldata(spi); | 289 | struct spi_transfer *transfer; |
284 | dspi->cs = spi->chip_select; | 290 | int status = 0; |
285 | dspi->void_write_data = dspi->cur_chip->void_write_data; | 291 | message->actual_length = 0; |
286 | 292 | ||
287 | dspi->dataflags = 0; | 293 | list_for_each_entry(transfer, &message->transfers, transfer_list) { |
288 | dspi->tx = (void *)t->tx_buf; | 294 | dspi->cur_transfer = transfer; |
289 | dspi->tx_end = dspi->tx + t->len; | 295 | dspi->cur_msg = message; |
290 | dspi->rx = t->rx_buf; | 296 | dspi->cur_chip = spi_get_ctldata(spi); |
291 | dspi->rx_end = dspi->rx + t->len; | 297 | dspi->cs = spi->chip_select; |
292 | dspi->len = t->len; | 298 | if (dspi->cur_transfer->transfer_list.next |
293 | 299 | == &dspi->cur_msg->transfers) | |
294 | if (!dspi->rx) | 300 | transfer->cs_change = 1; |
295 | dspi->dataflags |= TRAN_STATE_RX_VOID; | 301 | dspi->cs_change = transfer->cs_change; |
296 | 302 | dspi->void_write_data = dspi->cur_chip->void_write_data; | |
297 | if (!dspi->tx) | 303 | |
298 | dspi->dataflags |= TRAN_STATE_TX_VOID; | 304 | dspi->dataflags = 0; |
299 | 305 | dspi->tx = (void *)transfer->tx_buf; | |
300 | regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val); | 306 | dspi->tx_end = dspi->tx + transfer->len; |
301 | regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val); | 307 | dspi->rx = transfer->rx_buf; |
302 | regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); | 308 | dspi->rx_end = dspi->rx + transfer->len; |
303 | 309 | dspi->len = transfer->len; | |
304 | if (t->speed_hz) | 310 | |
311 | if (!dspi->rx) | ||
312 | dspi->dataflags |= TRAN_STATE_RX_VOID; | ||
313 | |||
314 | if (!dspi->tx) | ||
315 | dspi->dataflags |= TRAN_STATE_TX_VOID; | ||
316 | |||
317 | regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val); | ||
318 | regmap_update_bits(dspi->regmap, SPI_MCR, | ||
319 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, | ||
320 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); | ||
305 | regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), | 321 | regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), |
306 | dspi->cur_chip->ctar_val); | 322 | dspi->cur_chip->ctar_val); |
323 | if (transfer->speed_hz) | ||
324 | regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), | ||
325 | dspi->cur_chip->ctar_val); | ||
307 | 326 | ||
308 | dspi_transfer_write(dspi); | 327 | regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); |
328 | message->actual_length += dspi_transfer_write(dspi); | ||
309 | 329 | ||
310 | if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) | 330 | if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) |
311 | dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); | 331 | dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); |
312 | dspi->waitflags = 0; | 332 | dspi->waitflags = 0; |
313 | |||
314 | return t->len - dspi->len; | ||
315 | } | ||
316 | 333 | ||
317 | static void dspi_chipselect(struct spi_device *spi, int value) | 334 | if (transfer->delay_usecs) |
318 | { | 335 | udelay(transfer->delay_usecs); |
319 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | ||
320 | unsigned int pushr; | ||
321 | |||
322 | regmap_read(dspi->regmap, SPI_PUSHR, &pushr); | ||
323 | |||
324 | switch (value) { | ||
325 | case BITBANG_CS_ACTIVE: | ||
326 | pushr |= SPI_PUSHR_CONT; | ||
327 | break; | ||
328 | case BITBANG_CS_INACTIVE: | ||
329 | pushr &= ~SPI_PUSHR_CONT; | ||
330 | break; | ||
331 | } | 336 | } |
332 | 337 | ||
333 | regmap_write(dspi->regmap, SPI_PUSHR, pushr); | 338 | message->status = status; |
339 | spi_finalize_current_message(master); | ||
340 | |||
341 | return status; | ||
334 | } | 342 | } |
335 | 343 | ||
336 | static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | 344 | static int dspi_setup(struct spi_device *spi) |
337 | { | 345 | { |
338 | struct chip_data *chip; | 346 | struct chip_data *chip; |
339 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); | 347 | struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); |
340 | unsigned char br = 0, pbr = 0, fmsz = 0; | 348 | unsigned char br = 0, pbr = 0, fmsz = 0; |
341 | 349 | ||
350 | if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { | ||
351 | fmsz = spi->bits_per_word - 1; | ||
352 | } else { | ||
353 | pr_err("Invalid wordsize\n"); | ||
354 | return -ENODEV; | ||
355 | } | ||
356 | |||
342 | /* Only alloc on first setup */ | 357 | /* Only alloc on first setup */ |
343 | chip = spi_get_ctldata(spi); | 358 | chip = spi_get_ctldata(spi); |
344 | if (chip == NULL) { | 359 | if (chip == NULL) { |
345 | chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), | 360 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); |
346 | GFP_KERNEL); | ||
347 | if (!chip) | 361 | if (!chip) |
348 | return -ENOMEM; | 362 | return -ENOMEM; |
349 | } | 363 | } |
350 | 364 | ||
351 | chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | | 365 | chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | |
352 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; | 366 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; |
353 | if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) { | ||
354 | fmsz = spi->bits_per_word - 1; | ||
355 | } else { | ||
356 | pr_err("Invalid wordsize\n"); | ||
357 | return -ENODEV; | ||
358 | } | ||
359 | 367 | ||
360 | chip->void_write_data = 0; | 368 | chip->void_write_data = 0; |
361 | 369 | ||
@@ -374,34 +382,34 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
374 | return 0; | 382 | return 0; |
375 | } | 383 | } |
376 | 384 | ||
377 | static int dspi_setup(struct spi_device *spi) | 385 | static void dspi_cleanup(struct spi_device *spi) |
378 | { | 386 | { |
379 | if (!spi->max_speed_hz) | 387 | struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); |
380 | return -EINVAL; | 388 | |
389 | dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n", | ||
390 | spi->master->bus_num, spi->chip_select); | ||
381 | 391 | ||
382 | return dspi_setup_transfer(spi, NULL); | 392 | kfree(chip); |
383 | } | 393 | } |
384 | 394 | ||
385 | static irqreturn_t dspi_interrupt(int irq, void *dev_id) | 395 | static irqreturn_t dspi_interrupt(int irq, void *dev_id) |
386 | { | 396 | { |
387 | struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; | 397 | struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; |
388 | 398 | ||
389 | regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); | 399 | struct spi_message *msg = dspi->cur_msg; |
390 | 400 | ||
401 | regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); | ||
391 | dspi_transfer_read(dspi); | 402 | dspi_transfer_read(dspi); |
392 | 403 | ||
393 | if (!dspi->len) { | 404 | if (!dspi->len) { |
394 | if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) | 405 | if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) |
395 | regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), | 406 | regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), |
396 | SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); | 407 | SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); |
397 | 408 | ||
398 | dspi->waitflags = 1; | 409 | dspi->waitflags = 1; |
399 | wake_up_interruptible(&dspi->waitq); | 410 | wake_up_interruptible(&dspi->waitq); |
400 | } else { | 411 | } else |
401 | dspi_transfer_write(dspi); | 412 | msg->actual_length += dspi_transfer_write(dspi); |
402 | |||
403 | return IRQ_HANDLED; | ||
404 | } | ||
405 | 413 | ||
406 | return IRQ_HANDLED; | 414 | return IRQ_HANDLED; |
407 | } | 415 | } |
@@ -460,13 +468,14 @@ static int dspi_probe(struct platform_device *pdev) | |||
460 | 468 | ||
461 | dspi = spi_master_get_devdata(master); | 469 | dspi = spi_master_get_devdata(master); |
462 | dspi->pdev = pdev; | 470 | dspi->pdev = pdev; |
463 | dspi->bitbang.master = master; | 471 | dspi->master = master; |
464 | dspi->bitbang.chipselect = dspi_chipselect; | 472 | |
465 | dspi->bitbang.setup_transfer = dspi_setup_transfer; | 473 | master->transfer = NULL; |
466 | dspi->bitbang.txrx_bufs = dspi_txrx_transfer; | 474 | master->setup = dspi_setup; |
467 | dspi->bitbang.master->setup = dspi_setup; | 475 | master->transfer_one_message = dspi_transfer_one_message; |
468 | dspi->bitbang.master->dev.of_node = pdev->dev.of_node; | 476 | master->dev.of_node = pdev->dev.of_node; |
469 | 477 | ||
478 | master->cleanup = dspi_cleanup; | ||
470 | master->mode_bits = SPI_CPOL | SPI_CPHA; | 479 | master->mode_bits = SPI_CPOL | SPI_CPHA; |
471 | master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | | 480 | master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | |
472 | SPI_BPW_MASK(16); | 481 | SPI_BPW_MASK(16); |
@@ -525,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev) | |||
525 | init_waitqueue_head(&dspi->waitq); | 534 | init_waitqueue_head(&dspi->waitq); |
526 | platform_set_drvdata(pdev, master); | 535 | platform_set_drvdata(pdev, master); |
527 | 536 | ||
528 | ret = spi_bitbang_start(&dspi->bitbang); | 537 | ret = spi_register_master(master); |
529 | if (ret != 0) { | 538 | if (ret != 0) { |
530 | dev_err(&pdev->dev, "Problem registering DSPI master\n"); | 539 | dev_err(&pdev->dev, "Problem registering DSPI master\n"); |
531 | goto out_clk_put; | 540 | goto out_clk_put; |
@@ -547,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev) | |||
547 | struct fsl_dspi *dspi = spi_master_get_devdata(master); | 556 | struct fsl_dspi *dspi = spi_master_get_devdata(master); |
548 | 557 | ||
549 | /* Disconnect from the SPI framework */ | 558 | /* Disconnect from the SPI framework */ |
550 | spi_bitbang_stop(&dspi->bitbang); | ||
551 | clk_disable_unprepare(dspi->clk); | 559 | clk_disable_unprepare(dspi->clk); |
552 | spi_master_put(dspi->bitbang.master); | 560 | spi_unregister_master(dspi->master); |
561 | spi_master_put(dspi->master); | ||
553 | 562 | ||
554 | return 0; | 563 | return 0; |
555 | } | 564 | } |
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c index 446b737e1532..cb35d2f0d0e6 100644 --- a/drivers/spi/spi-fsl-lib.c +++ b/drivers/spi/spi-fsl-lib.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/module.h> | ||
24 | #include <linux/of_platform.h> | 25 | #include <linux/of_platform.h> |
25 | #include <linux/spi/spi.h> | 26 | #include <linux/spi/spi.h> |
26 | #ifdef CONFIG_FSL_SOC | 27 | #ifdef CONFIG_FSL_SOC |
@@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ | |||
35 | type *rx = mpc8xxx_spi->rx; \ | 36 | type *rx = mpc8xxx_spi->rx; \ |
36 | *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ | 37 | *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ |
37 | mpc8xxx_spi->rx = rx; \ | 38 | mpc8xxx_spi->rx = rx; \ |
38 | } | 39 | } \ |
40 | EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type); | ||
39 | 41 | ||
40 | #define MPC8XXX_SPI_TX_BUF(type) \ | 42 | #define MPC8XXX_SPI_TX_BUF(type) \ |
41 | u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ | 43 | u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ |
@@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ | |||
47 | data = *tx++ << mpc8xxx_spi->tx_shift; \ | 49 | data = *tx++ << mpc8xxx_spi->tx_shift; \ |
48 | mpc8xxx_spi->tx = tx; \ | 50 | mpc8xxx_spi->tx = tx; \ |
49 | return data; \ | 51 | return data; \ |
50 | } | 52 | } \ |
53 | EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type); | ||
51 | 54 | ||
52 | MPC8XXX_SPI_RX_BUF(u8) | 55 | MPC8XXX_SPI_RX_BUF(u8) |
53 | MPC8XXX_SPI_RX_BUF(u16) | 56 | MPC8XXX_SPI_RX_BUF(u16) |
@@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) | |||
60 | { | 63 | { |
61 | return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); | 64 | return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); |
62 | } | 65 | } |
66 | EXPORT_SYMBOL_GPL(to_of_pinfo); | ||
63 | 67 | ||
64 | const char *mpc8xxx_spi_strmode(unsigned int flags) | 68 | const char *mpc8xxx_spi_strmode(unsigned int flags) |
65 | { | 69 | { |
@@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags) | |||
75 | } | 79 | } |
76 | return "CPU"; | 80 | return "CPU"; |
77 | } | 81 | } |
82 | EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode); | ||
78 | 83 | ||
79 | void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, | 84 | void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, |
80 | unsigned int irq) | 85 | unsigned int irq) |
@@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, | |||
102 | mpc8xxx_spi->rx_shift = 0; | 107 | mpc8xxx_spi->rx_shift = 0; |
103 | mpc8xxx_spi->tx_shift = 0; | 108 | mpc8xxx_spi->tx_shift = 0; |
104 | 109 | ||
105 | init_completion(&mpc8xxx_spi->done); | ||
106 | |||
107 | master->bus_num = pdata->bus_num; | 110 | master->bus_num = pdata->bus_num; |
108 | master->num_chipselect = pdata->max_chipselect; | 111 | master->num_chipselect = pdata->max_chipselect; |
109 | 112 | ||
110 | init_completion(&mpc8xxx_spi->done); | 113 | init_completion(&mpc8xxx_spi->done); |
111 | } | 114 | } |
115 | EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe); | ||
112 | 116 | ||
113 | int mpc8xxx_spi_remove(struct device *dev) | 117 | int mpc8xxx_spi_remove(struct device *dev) |
114 | { | 118 | { |
@@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev) | |||
127 | 131 | ||
128 | return 0; | 132 | return 0; |
129 | } | 133 | } |
134 | EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove); | ||
130 | 135 | ||
131 | int of_mpc8xxx_spi_probe(struct platform_device *ofdev) | 136 | int of_mpc8xxx_spi_probe(struct platform_device *ofdev) |
132 | { | 137 | { |
@@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev) | |||
173 | 178 | ||
174 | return 0; | 179 | return 0; |
175 | } | 180 | } |
181 | EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe); | ||
182 | |||
183 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h index b4ed04e8862f..1326a392adca 100644 --- a/drivers/spi/spi-fsl-lib.h +++ b/drivers/spi/spi-fsl-lib.h | |||
@@ -28,7 +28,7 @@ struct mpc8xxx_spi { | |||
28 | /* rx & tx bufs from the spi_transfer */ | 28 | /* rx & tx bufs from the spi_transfer */ |
29 | const void *tx; | 29 | const void *tx; |
30 | void *rx; | 30 | void *rx; |
31 | #ifdef CONFIG_SPI_FSL_ESPI | 31 | #if IS_ENABLED(CONFIG_SPI_FSL_ESPI) |
32 | int len; | 32 | int len; |
33 | #endif | 33 | #endif |
34 | 34 | ||
@@ -68,7 +68,7 @@ struct mpc8xxx_spi { | |||
68 | 68 | ||
69 | unsigned int flags; | 69 | unsigned int flags; |
70 | 70 | ||
71 | #ifdef CONFIG_SPI_FSL_SPI | 71 | #if IS_ENABLED(CONFIG_SPI_FSL_SPI) |
72 | int type; | 72 | int type; |
73 | int native_chipselects; | 73 | int native_chipselects; |
74 | u8 max_bits_per_word; | 74 | u8 max_bits_per_word; |
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index aee4e7589568..1c34c9314c8a 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c | |||
@@ -12,10 +12,6 @@ | |||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | 15 | */ |
20 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | 17 | #include <linux/module.h> |
@@ -92,7 +88,7 @@ struct spi_gpio { | |||
92 | 88 | ||
93 | /*----------------------------------------------------------------------*/ | 89 | /*----------------------------------------------------------------------*/ |
94 | 90 | ||
95 | static inline struct spi_gpio * __pure | 91 | static inline struct spi_gpio *__pure |
96 | spi_to_spi_gpio(const struct spi_device *spi) | 92 | spi_to_spi_gpio(const struct spi_device *spi) |
97 | { | 93 | { |
98 | const struct spi_bitbang *bang; | 94 | const struct spi_bitbang *bang; |
@@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi) | |||
103 | return spi_gpio; | 99 | return spi_gpio; |
104 | } | 100 | } |
105 | 101 | ||
106 | static inline struct spi_gpio_platform_data * __pure | 102 | static inline struct spi_gpio_platform_data *__pure |
107 | spi_to_pdata(const struct spi_device *spi) | 103 | spi_to_pdata(const struct spi_device *spi) |
108 | { | 104 | { |
109 | return &spi_to_spi_gpio(spi)->pdata; | 105 | return &spi_to_spi_gpio(spi)->pdata; |
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index aad6683db81b..c01567d53581 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, | |||
160 | unsigned int count = 0; | 160 | unsigned int count = 0; |
161 | u32 status; | 161 | u32 status; |
162 | 162 | ||
163 | while (count < max) { | 163 | while (count < max / 4) { |
164 | spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); | 164 | spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); |
165 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | 165 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); |
166 | if (status & SPFI_INTERRUPT_SDFUL) | 166 | if (status & SPFI_INTERRUPT_SDFUL) |
167 | break; | 167 | break; |
168 | spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA); | 168 | spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA); |
169 | count += 4; | 169 | count++; |
170 | } | 170 | } |
171 | 171 | ||
172 | return count; | 172 | return count * 4; |
173 | } | 173 | } |
174 | 174 | ||
175 | static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, | 175 | static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, |
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf, | |||
196 | unsigned int count = 0; | 196 | unsigned int count = 0; |
197 | u32 status; | 197 | u32 status; |
198 | 198 | ||
199 | while (count < max) { | 199 | while (count < max / 4) { |
200 | spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, | 200 | spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, |
201 | SPFI_INTERRUPT_CLEAR); | 201 | SPFI_INTERRUPT_CLEAR); |
202 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); | 202 | status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); |
203 | if (!(status & SPFI_INTERRUPT_GDEX32BIT)) | 203 | if (!(status & SPFI_INTERRUPT_GDEX32BIT)) |
204 | break; | 204 | break; |
205 | buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); | 205 | buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); |
206 | count += 4; | 206 | count++; |
207 | } | 207 | } |
208 | 208 | ||
209 | return count; | 209 | return count * 4; |
210 | } | 210 | } |
211 | 211 | ||
212 | static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, | 212 | static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, |
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master, | |||
251 | time_before(jiffies, timeout)) { | 251 | time_before(jiffies, timeout)) { |
252 | unsigned int tx_count, rx_count; | 252 | unsigned int tx_count, rx_count; |
253 | 253 | ||
254 | switch (xfer->bits_per_word) { | 254 | if (tx_bytes >= 4) |
255 | case 32: | ||
256 | tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); | 255 | tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); |
257 | rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); | 256 | else |
258 | break; | ||
259 | case 8: | ||
260 | default: | ||
261 | tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); | 257 | tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); |
258 | |||
259 | if (rx_bytes >= 4) | ||
260 | rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); | ||
261 | else | ||
262 | rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); | 262 | rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); |
263 | break; | ||
264 | } | ||
265 | 263 | ||
266 | tx_buf += tx_count; | 264 | tx_buf += tx_count; |
267 | rx_buf += rx_count; | 265 | rx_buf += rx_count; |
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master, | |||
331 | 329 | ||
332 | if (xfer->rx_buf) { | 330 | if (xfer->rx_buf) { |
333 | rxconf.direction = DMA_DEV_TO_MEM; | 331 | rxconf.direction = DMA_DEV_TO_MEM; |
334 | switch (xfer->bits_per_word) { | 332 | if (xfer->len % 4 == 0) { |
335 | case 32: | ||
336 | rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; | 333 | rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; |
337 | rxconf.src_addr_width = 4; | 334 | rxconf.src_addr_width = 4; |
338 | rxconf.src_maxburst = 4; | 335 | rxconf.src_maxburst = 4; |
339 | break; | 336 | } else { |
340 | case 8: | ||
341 | default: | ||
342 | rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; | 337 | rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; |
343 | rxconf.src_addr_width = 1; | 338 | rxconf.src_addr_width = 1; |
344 | rxconf.src_maxburst = 4; | 339 | rxconf.src_maxburst = 4; |
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master, | |||
358 | 353 | ||
359 | if (xfer->tx_buf) { | 354 | if (xfer->tx_buf) { |
360 | txconf.direction = DMA_MEM_TO_DEV; | 355 | txconf.direction = DMA_MEM_TO_DEV; |
361 | switch (xfer->bits_per_word) { | 356 | if (xfer->len % 4 == 0) { |
362 | case 32: | ||
363 | txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; | 357 | txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; |
364 | txconf.dst_addr_width = 4; | 358 | txconf.dst_addr_width = 4; |
365 | txconf.dst_maxburst = 4; | 359 | txconf.dst_maxburst = 4; |
366 | break; | 360 | } else { |
367 | case 8: | ||
368 | default: | ||
369 | txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; | 361 | txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; |
370 | txconf.dst_addr_width = 1; | 362 | txconf.dst_addr_width = 1; |
371 | txconf.dst_maxburst = 4; | 363 | txconf.dst_maxburst = 4; |
372 | break; | ||
373 | } | 364 | } |
374 | dmaengine_slave_config(spfi->tx_ch, &txconf); | 365 | dmaengine_slave_config(spfi->tx_ch, &txconf); |
375 | 366 | ||
@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable) | |||
508 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, | 499 | static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, |
509 | struct spi_transfer *xfer) | 500 | struct spi_transfer *xfer) |
510 | { | 501 | { |
511 | if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE) | 502 | if (xfer->len > SPFI_32BIT_FIFO_SIZE) |
512 | return true; | ||
513 | if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE) | ||
514 | return true; | 503 | return true; |
515 | return false; | 504 | return false; |
516 | } | 505 | } |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 961b97d43b43..6fea4af51c41 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -89,7 +89,6 @@ struct spi_imx_data { | |||
89 | 89 | ||
90 | struct completion xfer_done; | 90 | struct completion xfer_done; |
91 | void __iomem *base; | 91 | void __iomem *base; |
92 | int irq; | ||
93 | struct clk *clk_per; | 92 | struct clk *clk_per; |
94 | struct clk *clk_ipg; | 93 | struct clk *clk_ipg; |
95 | unsigned long spi_clk; | 94 | unsigned long spi_clk; |
@@ -823,6 +822,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, | |||
823 | struct dma_slave_config slave_config = {}; | 822 | struct dma_slave_config slave_config = {}; |
824 | int ret; | 823 | int ret; |
825 | 824 | ||
825 | /* use pio mode for i.mx6dl chip TKT238285 */ | ||
826 | if (of_machine_is_compatible("fsl,imx6dl")) | ||
827 | return 0; | ||
828 | |||
826 | /* Prepare for TX DMA: */ | 829 | /* Prepare for TX DMA: */ |
827 | master->dma_tx = dma_request_slave_channel(dev, "tx"); | 830 | master->dma_tx = dma_request_slave_channel(dev, "tx"); |
828 | if (!master->dma_tx) { | 831 | if (!master->dma_tx) { |
@@ -892,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, | |||
892 | { | 895 | { |
893 | struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; | 896 | struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; |
894 | int ret; | 897 | int ret; |
898 | unsigned long timeout; | ||
895 | u32 dma; | 899 | u32 dma; |
896 | int left; | 900 | int left; |
897 | struct spi_master *master = spi_imx->bitbang.master; | 901 | struct spi_master *master = spi_imx->bitbang.master; |
@@ -939,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, | |||
939 | dma_async_issue_pending(master->dma_tx); | 943 | dma_async_issue_pending(master->dma_tx); |
940 | dma_async_issue_pending(master->dma_rx); | 944 | dma_async_issue_pending(master->dma_rx); |
941 | /* Wait SDMA to finish the data transfer.*/ | 945 | /* Wait SDMA to finish the data transfer.*/ |
942 | ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion, | 946 | timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, |
943 | IMX_DMA_TIMEOUT); | 947 | IMX_DMA_TIMEOUT); |
944 | if (!ret) { | 948 | if (!timeout) { |
945 | pr_warn("%s %s: I/O Error in DMA TX\n", | 949 | pr_warn("%s %s: I/O Error in DMA TX\n", |
946 | dev_driver_string(&master->dev), | 950 | dev_driver_string(&master->dev), |
947 | dev_name(&master->dev)); | 951 | dev_name(&master->dev)); |
948 | dmaengine_terminate_all(master->dma_tx); | 952 | dmaengine_terminate_all(master->dma_tx); |
949 | } else { | 953 | } else { |
950 | ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion, | 954 | timeout = wait_for_completion_timeout( |
951 | IMX_DMA_TIMEOUT); | 955 | &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT); |
952 | if (!ret) { | 956 | if (!timeout) { |
953 | pr_warn("%s %s: I/O Error in DMA RX\n", | 957 | pr_warn("%s %s: I/O Error in DMA RX\n", |
954 | dev_driver_string(&master->dev), | 958 | dev_driver_string(&master->dev), |
955 | dev_name(&master->dev)); | 959 | dev_name(&master->dev)); |
@@ -964,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, | |||
964 | spi_imx->dma_finished = 1; | 968 | spi_imx->dma_finished = 1; |
965 | spi_imx->devtype_data->trigger(spi_imx); | 969 | spi_imx->devtype_data->trigger(spi_imx); |
966 | 970 | ||
967 | if (!ret) | 971 | if (!timeout) |
968 | ret = -ETIMEDOUT; | 972 | ret = -ETIMEDOUT; |
969 | else if (ret > 0) | 973 | else |
970 | ret = transfer->len; | 974 | ret = transfer->len; |
971 | 975 | ||
972 | return ret; | 976 | return ret; |
@@ -1076,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev) | |||
1076 | struct spi_master *master; | 1080 | struct spi_master *master; |
1077 | struct spi_imx_data *spi_imx; | 1081 | struct spi_imx_data *spi_imx; |
1078 | struct resource *res; | 1082 | struct resource *res; |
1079 | int i, ret, num_cs; | 1083 | int i, ret, num_cs, irq; |
1080 | 1084 | ||
1081 | if (!np && !mxc_platform_info) { | 1085 | if (!np && !mxc_platform_info) { |
1082 | dev_err(&pdev->dev, "can't get the platform data\n"); | 1086 | dev_err(&pdev->dev, "can't get the platform data\n"); |
@@ -1143,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev) | |||
1143 | goto out_master_put; | 1147 | goto out_master_put; |
1144 | } | 1148 | } |
1145 | 1149 | ||
1146 | spi_imx->irq = platform_get_irq(pdev, 0); | 1150 | irq = platform_get_irq(pdev, 0); |
1147 | if (spi_imx->irq < 0) { | 1151 | if (irq < 0) { |
1148 | ret = spi_imx->irq; | 1152 | ret = irq; |
1149 | goto out_master_put; | 1153 | goto out_master_put; |
1150 | } | 1154 | } |
1151 | 1155 | ||
1152 | ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0, | 1156 | ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0, |
1153 | dev_name(&pdev->dev), spi_imx); | 1157 | dev_name(&pdev->dev), spi_imx); |
1154 | if (ret) { | 1158 | if (ret) { |
1155 | dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret); | 1159 | dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); |
1156 | goto out_master_put; | 1160 | goto out_master_put; |
1157 | } | 1161 | } |
1158 | 1162 | ||
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c index 41c5765be746..ba72347cb99d 100644 --- a/drivers/spi/spi-lm70llp.c +++ b/drivers/spi/spi-lm70llp.c | |||
@@ -12,10 +12,6 @@ | |||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | 15 | */ |
20 | 16 | ||
21 | #include <linux/init.h> | 17 | #include <linux/init.h> |
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index 1bbac0378bf7..5468fc70dbf8 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c | |||
@@ -85,7 +85,7 @@ struct meson_spifc { | |||
85 | struct device *dev; | 85 | struct device *dev; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static struct regmap_config spifc_regmap_config = { | 88 | static const struct regmap_config spifc_regmap_config = { |
89 | .reg_bits = 32, | 89 | .reg_bits = 32, |
90 | .val_bits = 32, | 90 | .val_bits = 32, |
91 | .reg_stride = 4, | 91 | .reg_stride = 4, |
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 4045a1e580e1..5b0e9a3e83f6 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c | |||
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, | |||
282 | dmaengine_submit(desc); | 282 | dmaengine_submit(desc); |
283 | dma_async_issue_pending(ssp->dmach); | 283 | dma_async_issue_pending(ssp->dmach); |
284 | 284 | ||
285 | ret = wait_for_completion_timeout(&spi->c, | 285 | if (!wait_for_completion_timeout(&spi->c, |
286 | msecs_to_jiffies(SSP_TIMEOUT)); | 286 | msecs_to_jiffies(SSP_TIMEOUT))) { |
287 | if (!ret) { | ||
288 | dev_err(ssp->dev, "DMA transfer timeout\n"); | 287 | dev_err(ssp->dev, "DMA transfer timeout\n"); |
289 | ret = -ETIMEDOUT; | 288 | ret = -ETIMEDOUT; |
290 | dmaengine_terminate_all(ssp->dmach); | 289 | dmaengine_terminate_all(ssp->dmach); |
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index 79399ae9c84c..d890d309dff9 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c | |||
@@ -16,11 +16,6 @@ | |||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
23 | * | ||
24 | */ | 19 | */ |
25 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
26 | #include <linux/init.h> | 21 | #include <linux/init.h> |
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c index daf1ada5cd11..3c0844457c07 100644 --- a/drivers/spi/spi-omap-uwire.c +++ b/drivers/spi/spi-omap-uwire.c | |||
@@ -28,10 +28,6 @@ | |||
28 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 28 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
31 | * | ||
32 | * You should have received a copy of the GNU General Public License along | ||
33 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
34 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
35 | */ | 31 | */ |
36 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
37 | #include <linux/init.h> | 33 | #include <linux/init.h> |
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 3bc3cbabbbc0..4df8942058de 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c | |||
@@ -14,11 +14,6 @@ | |||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | * | ||
22 | */ | 17 | */ |
23 | 18 | ||
24 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 3dec9e0b99b8..861664776672 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c | |||
@@ -28,7 +28,12 @@ | |||
28 | /* Runtime PM autosuspend timeout: PM is fairly light on this driver */ | 28 | /* Runtime PM autosuspend timeout: PM is fairly light on this driver */ |
29 | #define SPI_AUTOSUSPEND_TIMEOUT 200 | 29 | #define SPI_AUTOSUSPEND_TIMEOUT 200 |
30 | 30 | ||
31 | #define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ | 31 | /* Some SoCs using this driver support up to 8 chip selects. |
32 | * It is up to the implementer to only use the chip selects | ||
33 | * that are available. | ||
34 | */ | ||
35 | #define ORION_NUM_CHIPSELECTS 8 | ||
36 | |||
32 | #define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ | 37 | #define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ |
33 | 38 | ||
34 | #define ORION_SPI_IF_CTRL_REG 0x00 | 39 | #define ORION_SPI_IF_CTRL_REG 0x00 |
@@ -44,6 +49,10 @@ | |||
44 | #define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF | 49 | #define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF |
45 | #define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ | 50 | #define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ |
46 | ORION_SPI_MODE_CPHA) | 51 | ORION_SPI_MODE_CPHA) |
52 | #define ORION_SPI_CS_MASK 0x1C | ||
53 | #define ORION_SPI_CS_SHIFT 2 | ||
54 | #define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \ | ||
55 | ORION_SPI_CS_MASK) | ||
47 | 56 | ||
48 | enum orion_spi_type { | 57 | enum orion_spi_type { |
49 | ORION_SPI, | 58 | ORION_SPI, |
@@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
215 | return 0; | 224 | return 0; |
216 | } | 225 | } |
217 | 226 | ||
218 | static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable) | 227 | static void orion_spi_set_cs(struct spi_device *spi, bool enable) |
219 | { | 228 | { |
220 | if (enable) | 229 | struct orion_spi *orion_spi; |
230 | |||
231 | orion_spi = spi_master_get_devdata(spi->master); | ||
232 | |||
233 | orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK); | ||
234 | orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, | ||
235 | ORION_SPI_CS(spi->chip_select)); | ||
236 | |||
237 | /* Chip select logic is inverted from spi_set_cs */ | ||
238 | if (!enable) | ||
221 | orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); | 239 | orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); |
222 | else | 240 | else |
223 | orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); | 241 | orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); |
@@ -332,64 +350,31 @@ out: | |||
332 | return xfer->len - count; | 350 | return xfer->len - count; |
333 | } | 351 | } |
334 | 352 | ||
335 | static int orion_spi_transfer_one_message(struct spi_master *master, | 353 | static int orion_spi_transfer_one(struct spi_master *master, |
336 | struct spi_message *m) | 354 | struct spi_device *spi, |
355 | struct spi_transfer *t) | ||
337 | { | 356 | { |
338 | struct orion_spi *orion_spi = spi_master_get_devdata(master); | ||
339 | struct spi_device *spi = m->spi; | ||
340 | struct spi_transfer *t = NULL; | ||
341 | int par_override = 0; | ||
342 | int status = 0; | 357 | int status = 0; |
343 | int cs_active = 0; | ||
344 | |||
345 | /* Load defaults */ | ||
346 | status = orion_spi_setup_transfer(spi, NULL); | ||
347 | 358 | ||
359 | status = orion_spi_setup_transfer(spi, t); | ||
348 | if (status < 0) | 360 | if (status < 0) |
349 | goto msg_done; | 361 | return status; |
350 | |||
351 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
352 | if (par_override || t->speed_hz || t->bits_per_word) { | ||
353 | par_override = 1; | ||
354 | status = orion_spi_setup_transfer(spi, t); | ||
355 | if (status < 0) | ||
356 | break; | ||
357 | if (!t->speed_hz && !t->bits_per_word) | ||
358 | par_override = 0; | ||
359 | } | ||
360 | |||
361 | if (!cs_active) { | ||
362 | orion_spi_set_cs(orion_spi, 1); | ||
363 | cs_active = 1; | ||
364 | } | ||
365 | 362 | ||
366 | if (t->len) | 363 | if (t->len) |
367 | m->actual_length += orion_spi_write_read(spi, t); | 364 | orion_spi_write_read(spi, t); |
368 | 365 | ||
369 | if (t->delay_usecs) | 366 | return status; |
370 | udelay(t->delay_usecs); | 367 | } |
371 | |||
372 | if (t->cs_change) { | ||
373 | orion_spi_set_cs(orion_spi, 0); | ||
374 | cs_active = 0; | ||
375 | } | ||
376 | } | ||
377 | |||
378 | msg_done: | ||
379 | if (cs_active) | ||
380 | orion_spi_set_cs(orion_spi, 0); | ||
381 | |||
382 | m->status = status; | ||
383 | spi_finalize_current_message(master); | ||
384 | 368 | ||
385 | return 0; | 369 | static int orion_spi_setup(struct spi_device *spi) |
370 | { | ||
371 | return orion_spi_setup_transfer(spi, NULL); | ||
386 | } | 372 | } |
387 | 373 | ||
388 | static int orion_spi_reset(struct orion_spi *orion_spi) | 374 | static int orion_spi_reset(struct orion_spi *orion_spi) |
389 | { | 375 | { |
390 | /* Verify that the CS is deasserted */ | 376 | /* Verify that the CS is deasserted */ |
391 | orion_spi_set_cs(orion_spi, 0); | 377 | orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1); |
392 | |||
393 | return 0; | 378 | return 0; |
394 | } | 379 | } |
395 | 380 | ||
@@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev) | |||
442 | 427 | ||
443 | /* we support only mode 0, and no options */ | 428 | /* we support only mode 0, and no options */ |
444 | master->mode_bits = SPI_CPHA | SPI_CPOL; | 429 | master->mode_bits = SPI_CPHA | SPI_CPOL; |
445 | 430 | master->set_cs = orion_spi_set_cs; | |
446 | master->transfer_one_message = orion_spi_transfer_one_message; | 431 | master->transfer_one = orion_spi_transfer_one; |
447 | master->num_chipselect = ORION_NUM_CHIPSELECTS; | 432 | master->num_chipselect = ORION_NUM_CHIPSELECTS; |
433 | master->setup = orion_spi_setup; | ||
448 | master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); | 434 | master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); |
449 | master->auto_runtime_pm = true; | 435 | master->auto_runtime_pm = true; |
450 | 436 | ||
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 62a9297e96ac..66a173939be8 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c | |||
@@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, | |||
111 | * by using ->dma_running. | 111 | * by using ->dma_running. |
112 | */ | 112 | */ |
113 | if (atomic_dec_and_test(&drv_data->dma_running)) { | 113 | if (atomic_dec_and_test(&drv_data->dma_running)) { |
114 | void __iomem *reg = drv_data->ioaddr; | ||
115 | |||
116 | /* | 114 | /* |
117 | * If the other CPU is still handling the ROR interrupt we | 115 | * If the other CPU is still handling the ROR interrupt we |
118 | * might not know about the error yet. So we re-check the | 116 | * might not know about the error yet. So we re-check the |
119 | * ROR bit here before we clear the status register. | 117 | * ROR bit here before we clear the status register. |
120 | */ | 118 | */ |
121 | if (!error) { | 119 | if (!error) { |
122 | u32 status = read_SSSR(reg) & drv_data->mask_sr; | 120 | u32 status = pxa2xx_spi_read(drv_data, SSSR) |
121 | & drv_data->mask_sr; | ||
123 | error = status & SSSR_ROR; | 122 | error = status & SSSR_ROR; |
124 | } | 123 | } |
125 | 124 | ||
126 | /* Clear status & disable interrupts */ | 125 | /* Clear status & disable interrupts */ |
127 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | 126 | pxa2xx_spi_write(drv_data, SSCR1, |
127 | pxa2xx_spi_read(drv_data, SSCR1) | ||
128 | & ~drv_data->dma_cr1); | ||
128 | write_SSSR_CS(drv_data, drv_data->clear_sr); | 129 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
129 | if (!pxa25x_ssp_comp(drv_data)) | 130 | if (!pxa25x_ssp_comp(drv_data)) |
130 | write_SSTO(0, reg); | 131 | pxa2xx_spi_write(drv_data, SSTO, 0); |
131 | 132 | ||
132 | if (!error) { | 133 | if (!error) { |
133 | pxa2xx_spi_unmap_dma_buffers(drv_data); | 134 | pxa2xx_spi_unmap_dma_buffers(drv_data); |
@@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, | |||
139 | msg->state = pxa2xx_spi_next_transfer(drv_data); | 140 | msg->state = pxa2xx_spi_next_transfer(drv_data); |
140 | } else { | 141 | } else { |
141 | /* In case we got an error we disable the SSP now */ | 142 | /* In case we got an error we disable the SSP now */ |
142 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 143 | pxa2xx_spi_write(drv_data, SSCR0, |
144 | pxa2xx_spi_read(drv_data, SSCR0) | ||
145 | & ~SSCR0_SSE); | ||
143 | 146 | ||
144 | msg->state = ERROR_STATE; | 147 | msg->state = ERROR_STATE; |
145 | } | 148 | } |
@@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | |||
247 | { | 250 | { |
248 | u32 status; | 251 | u32 status; |
249 | 252 | ||
250 | status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr; | 253 | status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr; |
251 | if (status & SSSR_ROR) { | 254 | if (status & SSSR_ROR) { |
252 | dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); | 255 | dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); |
253 | 256 | ||
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c index e8a26f25d5c0..2e0796a0003f 100644 --- a/drivers/spi/spi-pxa2xx-pxadma.c +++ b/drivers/spi/spi-pxa2xx-pxadma.c | |||
@@ -12,10 +12,6 @@ | |||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | 15 | */ |
20 | 16 | ||
21 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
@@ -25,6 +21,7 @@ | |||
25 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
26 | #include <linux/spi/pxa2xx_spi.h> | 22 | #include <linux/spi/pxa2xx_spi.h> |
27 | 23 | ||
24 | #include <mach/dma.h> | ||
28 | #include "spi-pxa2xx.h" | 25 | #include "spi-pxa2xx.h" |
29 | 26 | ||
30 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | 27 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) |
@@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data) | |||
118 | drv_data->dma_mapped = 0; | 115 | drv_data->dma_mapped = 0; |
119 | } | 116 | } |
120 | 117 | ||
121 | static int wait_ssp_rx_stall(void const __iomem *ioaddr) | 118 | static int wait_ssp_rx_stall(struct driver_data *drv_data) |
122 | { | 119 | { |
123 | unsigned long limit = loops_per_jiffy << 1; | 120 | unsigned long limit = loops_per_jiffy << 1; |
124 | 121 | ||
125 | while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) | 122 | while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit) |
126 | cpu_relax(); | 123 | cpu_relax(); |
127 | 124 | ||
128 | return limit; | 125 | return limit; |
@@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel) | |||
141 | static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, | 138 | static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, |
142 | const char *msg) | 139 | const char *msg) |
143 | { | 140 | { |
144 | void __iomem *reg = drv_data->ioaddr; | ||
145 | |||
146 | /* Stop and reset */ | 141 | /* Stop and reset */ |
147 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | 142 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; |
148 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | 143 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; |
149 | write_SSSR_CS(drv_data, drv_data->clear_sr); | 144 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
150 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | 145 | pxa2xx_spi_write(drv_data, SSCR1, |
146 | pxa2xx_spi_read(drv_data, SSCR1) | ||
147 | & ~drv_data->dma_cr1); | ||
151 | if (!pxa25x_ssp_comp(drv_data)) | 148 | if (!pxa25x_ssp_comp(drv_data)) |
152 | write_SSTO(0, reg); | 149 | pxa2xx_spi_write(drv_data, SSTO, 0); |
153 | pxa2xx_spi_flush(drv_data); | 150 | pxa2xx_spi_flush(drv_data); |
154 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 151 | pxa2xx_spi_write(drv_data, SSCR0, |
152 | pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); | ||
155 | 153 | ||
156 | pxa2xx_spi_unmap_dma_buffers(drv_data); | 154 | pxa2xx_spi_unmap_dma_buffers(drv_data); |
157 | 155 | ||
@@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data, | |||
163 | 161 | ||
164 | static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) | 162 | static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data) |
165 | { | 163 | { |
166 | void __iomem *reg = drv_data->ioaddr; | ||
167 | struct spi_message *msg = drv_data->cur_msg; | 164 | struct spi_message *msg = drv_data->cur_msg; |
168 | 165 | ||
169 | /* Clear and disable interrupts on SSP and DMA channels*/ | 166 | /* Clear and disable interrupts on SSP and DMA channels*/ |
170 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | 167 | pxa2xx_spi_write(drv_data, SSCR1, |
168 | pxa2xx_spi_read(drv_data, SSCR1) | ||
169 | & ~drv_data->dma_cr1); | ||
171 | write_SSSR_CS(drv_data, drv_data->clear_sr); | 170 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
172 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | 171 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; |
173 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | 172 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; |
@@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data) | |||
228 | && (drv_data->ssp_type == PXA25x_SSP)) { | 227 | && (drv_data->ssp_type == PXA25x_SSP)) { |
229 | 228 | ||
230 | /* Wait for rx to stall */ | 229 | /* Wait for rx to stall */ |
231 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | 230 | if (wait_ssp_rx_stall(drv_data) == 0) |
232 | dev_err(&drv_data->pdev->dev, | 231 | dev_err(&drv_data->pdev->dev, |
233 | "dma_handler: ssp rx stall failed\n"); | 232 | "dma_handler: ssp rx stall failed\n"); |
234 | 233 | ||
@@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data) | |||
240 | irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | 239 | irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) |
241 | { | 240 | { |
242 | u32 irq_status; | 241 | u32 irq_status; |
243 | void __iomem *reg = drv_data->ioaddr; | ||
244 | 242 | ||
245 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | 243 | irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr; |
246 | if (irq_status & SSSR_ROR) { | 244 | if (irq_status & SSSR_ROR) { |
247 | pxa2xx_spi_dma_error_stop(drv_data, | 245 | pxa2xx_spi_dma_error_stop(drv_data, |
248 | "dma_transfer: fifo overrun"); | 246 | "dma_transfer: fifo overrun"); |
@@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | |||
252 | /* Check for false positive timeout */ | 250 | /* Check for false positive timeout */ |
253 | if ((irq_status & SSSR_TINT) | 251 | if ((irq_status & SSSR_TINT) |
254 | && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { | 252 | && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { |
255 | write_SSSR(SSSR_TINT, reg); | 253 | pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT); |
256 | return IRQ_HANDLED; | 254 | return IRQ_HANDLED; |
257 | } | 255 | } |
258 | 256 | ||
@@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) | |||
261 | /* Clear and disable timeout interrupt, do the rest in | 259 | /* Clear and disable timeout interrupt, do the rest in |
262 | * dma_transfer_complete */ | 260 | * dma_transfer_complete */ |
263 | if (!pxa25x_ssp_comp(drv_data)) | 261 | if (!pxa25x_ssp_comp(drv_data)) |
264 | write_SSTO(0, reg); | 262 | pxa2xx_spi_write(drv_data, SSTO, 0); |
265 | 263 | ||
266 | /* finish this transfer, start the next */ | 264 | /* finish this transfer, start the next */ |
267 | pxa2xx_spi_dma_transfer_complete(drv_data); | 265 | pxa2xx_spi_dma_transfer_complete(drv_data); |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 05c623cfb078..6f72ad01e041 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -11,10 +11,6 @@ | |||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | 14 | */ |
19 | 15 | ||
20 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); | |||
45 | MODULE_LICENSE("GPL"); | 41 | MODULE_LICENSE("GPL"); |
46 | MODULE_ALIAS("platform:pxa2xx-spi"); | 42 | MODULE_ALIAS("platform:pxa2xx-spi"); |
47 | 43 | ||
48 | #define MAX_BUSES 3 | ||
49 | |||
50 | #define TIMOUT_DFLT 1000 | 44 | #define TIMOUT_DFLT 1000 |
51 | 45 | ||
52 | /* | 46 | /* |
@@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data) | |||
162 | 156 | ||
163 | static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) | 157 | static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) |
164 | { | 158 | { |
165 | void __iomem *reg = drv_data->ioaddr; | ||
166 | u32 mask; | 159 | u32 mask; |
167 | 160 | ||
168 | switch (drv_data->ssp_type) { | 161 | switch (drv_data->ssp_type) { |
@@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data) | |||
174 | break; | 167 | break; |
175 | } | 168 | } |
176 | 169 | ||
177 | return (read_SSSR(reg) & mask) == mask; | 170 | return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask; |
178 | } | 171 | } |
179 | 172 | ||
180 | static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data, | 173 | static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data, |
@@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data) | |||
253 | unsigned offset = 0x400; | 246 | unsigned offset = 0x400; |
254 | u32 value, orig; | 247 | u32 value, orig; |
255 | 248 | ||
256 | if (!is_lpss_ssp(drv_data)) | ||
257 | return; | ||
258 | |||
259 | /* | 249 | /* |
260 | * Perform auto-detection of the LPSS SSP private registers. They | 250 | * Perform auto-detection of the LPSS SSP private registers. They |
261 | * can be either at 1k or 2k offset from the base address. | 251 | * can be either at 1k or 2k offset from the base address. |
@@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) | |||
304 | { | 294 | { |
305 | u32 value; | 295 | u32 value; |
306 | 296 | ||
307 | if (!is_lpss_ssp(drv_data)) | ||
308 | return; | ||
309 | |||
310 | value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); | 297 | value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); |
311 | if (enable) | 298 | if (enable) |
312 | value &= ~SPI_CS_CONTROL_CS_HIGH; | 299 | value &= ~SPI_CS_CONTROL_CS_HIGH; |
@@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data) | |||
320 | struct chip_data *chip = drv_data->cur_chip; | 307 | struct chip_data *chip = drv_data->cur_chip; |
321 | 308 | ||
322 | if (drv_data->ssp_type == CE4100_SSP) { | 309 | if (drv_data->ssp_type == CE4100_SSP) { |
323 | write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); | 310 | pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm); |
324 | return; | 311 | return; |
325 | } | 312 | } |
326 | 313 | ||
@@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data) | |||
334 | return; | 321 | return; |
335 | } | 322 | } |
336 | 323 | ||
337 | lpss_ssp_cs_control(drv_data, true); | 324 | if (is_lpss_ssp(drv_data)) |
325 | lpss_ssp_cs_control(drv_data, true); | ||
338 | } | 326 | } |
339 | 327 | ||
340 | static void cs_deassert(struct driver_data *drv_data) | 328 | static void cs_deassert(struct driver_data *drv_data) |
@@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data) | |||
354 | return; | 342 | return; |
355 | } | 343 | } |
356 | 344 | ||
357 | lpss_ssp_cs_control(drv_data, false); | 345 | if (is_lpss_ssp(drv_data)) |
346 | lpss_ssp_cs_control(drv_data, false); | ||
358 | } | 347 | } |
359 | 348 | ||
360 | int pxa2xx_spi_flush(struct driver_data *drv_data) | 349 | int pxa2xx_spi_flush(struct driver_data *drv_data) |
361 | { | 350 | { |
362 | unsigned long limit = loops_per_jiffy << 1; | 351 | unsigned long limit = loops_per_jiffy << 1; |
363 | 352 | ||
364 | void __iomem *reg = drv_data->ioaddr; | ||
365 | |||
366 | do { | 353 | do { |
367 | while (read_SSSR(reg) & SSSR_RNE) { | 354 | while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) |
368 | read_SSDR(reg); | 355 | pxa2xx_spi_read(drv_data, SSDR); |
369 | } | 356 | } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit); |
370 | } while ((read_SSSR(reg) & SSSR_BSY) && --limit); | ||
371 | write_SSSR_CS(drv_data, SSSR_ROR); | 357 | write_SSSR_CS(drv_data, SSSR_ROR); |
372 | 358 | ||
373 | return limit; | 359 | return limit; |
@@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data) | |||
375 | 361 | ||
376 | static int null_writer(struct driver_data *drv_data) | 362 | static int null_writer(struct driver_data *drv_data) |
377 | { | 363 | { |
378 | void __iomem *reg = drv_data->ioaddr; | ||
379 | u8 n_bytes = drv_data->n_bytes; | 364 | u8 n_bytes = drv_data->n_bytes; |
380 | 365 | ||
381 | if (pxa2xx_spi_txfifo_full(drv_data) | 366 | if (pxa2xx_spi_txfifo_full(drv_data) |
382 | || (drv_data->tx == drv_data->tx_end)) | 367 | || (drv_data->tx == drv_data->tx_end)) |
383 | return 0; | 368 | return 0; |
384 | 369 | ||
385 | write_SSDR(0, reg); | 370 | pxa2xx_spi_write(drv_data, SSDR, 0); |
386 | drv_data->tx += n_bytes; | 371 | drv_data->tx += n_bytes; |
387 | 372 | ||
388 | return 1; | 373 | return 1; |
@@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data) | |||
390 | 375 | ||
391 | static int null_reader(struct driver_data *drv_data) | 376 | static int null_reader(struct driver_data *drv_data) |
392 | { | 377 | { |
393 | void __iomem *reg = drv_data->ioaddr; | ||
394 | u8 n_bytes = drv_data->n_bytes; | 378 | u8 n_bytes = drv_data->n_bytes; |
395 | 379 | ||
396 | while ((read_SSSR(reg) & SSSR_RNE) | 380 | while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) |
397 | && (drv_data->rx < drv_data->rx_end)) { | 381 | && (drv_data->rx < drv_data->rx_end)) { |
398 | read_SSDR(reg); | 382 | pxa2xx_spi_read(drv_data, SSDR); |
399 | drv_data->rx += n_bytes; | 383 | drv_data->rx += n_bytes; |
400 | } | 384 | } |
401 | 385 | ||
@@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data) | |||
404 | 388 | ||
405 | static int u8_writer(struct driver_data *drv_data) | 389 | static int u8_writer(struct driver_data *drv_data) |
406 | { | 390 | { |
407 | void __iomem *reg = drv_data->ioaddr; | ||
408 | |||
409 | if (pxa2xx_spi_txfifo_full(drv_data) | 391 | if (pxa2xx_spi_txfifo_full(drv_data) |
410 | || (drv_data->tx == drv_data->tx_end)) | 392 | || (drv_data->tx == drv_data->tx_end)) |
411 | return 0; | 393 | return 0; |
412 | 394 | ||
413 | write_SSDR(*(u8 *)(drv_data->tx), reg); | 395 | pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx)); |
414 | ++drv_data->tx; | 396 | ++drv_data->tx; |
415 | 397 | ||
416 | return 1; | 398 | return 1; |
@@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data) | |||
418 | 400 | ||
419 | static int u8_reader(struct driver_data *drv_data) | 401 | static int u8_reader(struct driver_data *drv_data) |
420 | { | 402 | { |
421 | void __iomem *reg = drv_data->ioaddr; | 403 | while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) |
422 | 404 | && (drv_data->rx < drv_data->rx_end)) { | |
423 | while ((read_SSSR(reg) & SSSR_RNE) | 405 | *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); |
424 | && (drv_data->rx < drv_data->rx_end)) { | ||
425 | *(u8 *)(drv_data->rx) = read_SSDR(reg); | ||
426 | ++drv_data->rx; | 406 | ++drv_data->rx; |
427 | } | 407 | } |
428 | 408 | ||
@@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data) | |||
431 | 411 | ||
432 | static int u16_writer(struct driver_data *drv_data) | 412 | static int u16_writer(struct driver_data *drv_data) |
433 | { | 413 | { |
434 | void __iomem *reg = drv_data->ioaddr; | ||
435 | |||
436 | if (pxa2xx_spi_txfifo_full(drv_data) | 414 | if (pxa2xx_spi_txfifo_full(drv_data) |
437 | || (drv_data->tx == drv_data->tx_end)) | 415 | || (drv_data->tx == drv_data->tx_end)) |
438 | return 0; | 416 | return 0; |
439 | 417 | ||
440 | write_SSDR(*(u16 *)(drv_data->tx), reg); | 418 | pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx)); |
441 | drv_data->tx += 2; | 419 | drv_data->tx += 2; |
442 | 420 | ||
443 | return 1; | 421 | return 1; |
@@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data) | |||
445 | 423 | ||
446 | static int u16_reader(struct driver_data *drv_data) | 424 | static int u16_reader(struct driver_data *drv_data) |
447 | { | 425 | { |
448 | void __iomem *reg = drv_data->ioaddr; | 426 | while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) |
449 | 427 | && (drv_data->rx < drv_data->rx_end)) { | |
450 | while ((read_SSSR(reg) & SSSR_RNE) | 428 | *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); |
451 | && (drv_data->rx < drv_data->rx_end)) { | ||
452 | *(u16 *)(drv_data->rx) = read_SSDR(reg); | ||
453 | drv_data->rx += 2; | 429 | drv_data->rx += 2; |
454 | } | 430 | } |
455 | 431 | ||
@@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data) | |||
458 | 434 | ||
459 | static int u32_writer(struct driver_data *drv_data) | 435 | static int u32_writer(struct driver_data *drv_data) |
460 | { | 436 | { |
461 | void __iomem *reg = drv_data->ioaddr; | ||
462 | |||
463 | if (pxa2xx_spi_txfifo_full(drv_data) | 437 | if (pxa2xx_spi_txfifo_full(drv_data) |
464 | || (drv_data->tx == drv_data->tx_end)) | 438 | || (drv_data->tx == drv_data->tx_end)) |
465 | return 0; | 439 | return 0; |
466 | 440 | ||
467 | write_SSDR(*(u32 *)(drv_data->tx), reg); | 441 | pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx)); |
468 | drv_data->tx += 4; | 442 | drv_data->tx += 4; |
469 | 443 | ||
470 | return 1; | 444 | return 1; |
@@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data) | |||
472 | 446 | ||
473 | static int u32_reader(struct driver_data *drv_data) | 447 | static int u32_reader(struct driver_data *drv_data) |
474 | { | 448 | { |
475 | void __iomem *reg = drv_data->ioaddr; | 449 | while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE) |
476 | 450 | && (drv_data->rx < drv_data->rx_end)) { | |
477 | while ((read_SSSR(reg) & SSSR_RNE) | 451 | *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR); |
478 | && (drv_data->rx < drv_data->rx_end)) { | ||
479 | *(u32 *)(drv_data->rx) = read_SSDR(reg); | ||
480 | drv_data->rx += 4; | 452 | drv_data->rx += 4; |
481 | } | 453 | } |
482 | 454 | ||
@@ -546,33 +518,31 @@ static void giveback(struct driver_data *drv_data) | |||
546 | cs_deassert(drv_data); | 518 | cs_deassert(drv_data); |
547 | } | 519 | } |
548 | 520 | ||
549 | spi_finalize_current_message(drv_data->master); | ||
550 | drv_data->cur_chip = NULL; | 521 | drv_data->cur_chip = NULL; |
522 | spi_finalize_current_message(drv_data->master); | ||
551 | } | 523 | } |
552 | 524 | ||
553 | static void reset_sccr1(struct driver_data *drv_data) | 525 | static void reset_sccr1(struct driver_data *drv_data) |
554 | { | 526 | { |
555 | void __iomem *reg = drv_data->ioaddr; | ||
556 | struct chip_data *chip = drv_data->cur_chip; | 527 | struct chip_data *chip = drv_data->cur_chip; |
557 | u32 sccr1_reg; | 528 | u32 sccr1_reg; |
558 | 529 | ||
559 | sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; | 530 | sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; |
560 | sccr1_reg &= ~SSCR1_RFT; | 531 | sccr1_reg &= ~SSCR1_RFT; |
561 | sccr1_reg |= chip->threshold; | 532 | sccr1_reg |= chip->threshold; |
562 | write_SSCR1(sccr1_reg, reg); | 533 | pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); |
563 | } | 534 | } |
564 | 535 | ||
565 | static void int_error_stop(struct driver_data *drv_data, const char* msg) | 536 | static void int_error_stop(struct driver_data *drv_data, const char* msg) |
566 | { | 537 | { |
567 | void __iomem *reg = drv_data->ioaddr; | ||
568 | |||
569 | /* Stop and reset SSP */ | 538 | /* Stop and reset SSP */ |
570 | write_SSSR_CS(drv_data, drv_data->clear_sr); | 539 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
571 | reset_sccr1(drv_data); | 540 | reset_sccr1(drv_data); |
572 | if (!pxa25x_ssp_comp(drv_data)) | 541 | if (!pxa25x_ssp_comp(drv_data)) |
573 | write_SSTO(0, reg); | 542 | pxa2xx_spi_write(drv_data, SSTO, 0); |
574 | pxa2xx_spi_flush(drv_data); | 543 | pxa2xx_spi_flush(drv_data); |
575 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 544 | pxa2xx_spi_write(drv_data, SSCR0, |
545 | pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); | ||
576 | 546 | ||
577 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | 547 | dev_err(&drv_data->pdev->dev, "%s\n", msg); |
578 | 548 | ||
@@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) | |||
582 | 552 | ||
583 | static void int_transfer_complete(struct driver_data *drv_data) | 553 | static void int_transfer_complete(struct driver_data *drv_data) |
584 | { | 554 | { |
585 | void __iomem *reg = drv_data->ioaddr; | ||
586 | |||
587 | /* Stop SSP */ | 555 | /* Stop SSP */ |
588 | write_SSSR_CS(drv_data, drv_data->clear_sr); | 556 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
589 | reset_sccr1(drv_data); | 557 | reset_sccr1(drv_data); |
590 | if (!pxa25x_ssp_comp(drv_data)) | 558 | if (!pxa25x_ssp_comp(drv_data)) |
591 | write_SSTO(0, reg); | 559 | pxa2xx_spi_write(drv_data, SSTO, 0); |
592 | 560 | ||
593 | /* Update total byte transferred return count actual bytes read */ | 561 | /* Update total byte transferred return count actual bytes read */ |
594 | drv_data->cur_msg->actual_length += drv_data->len - | 562 | drv_data->cur_msg->actual_length += drv_data->len - |
@@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data) | |||
607 | 575 | ||
608 | static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | 576 | static irqreturn_t interrupt_transfer(struct driver_data *drv_data) |
609 | { | 577 | { |
610 | void __iomem *reg = drv_data->ioaddr; | 578 | u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ? |
579 | drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; | ||
611 | 580 | ||
612 | u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? | 581 | u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask; |
613 | drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; | ||
614 | |||
615 | u32 irq_status = read_SSSR(reg) & irq_mask; | ||
616 | 582 | ||
617 | if (irq_status & SSSR_ROR) { | 583 | if (irq_status & SSSR_ROR) { |
618 | int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); | 584 | int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); |
@@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | |||
620 | } | 586 | } |
621 | 587 | ||
622 | if (irq_status & SSSR_TINT) { | 588 | if (irq_status & SSSR_TINT) { |
623 | write_SSSR(SSSR_TINT, reg); | 589 | pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT); |
624 | if (drv_data->read(drv_data)) { | 590 | if (drv_data->read(drv_data)) { |
625 | int_transfer_complete(drv_data); | 591 | int_transfer_complete(drv_data); |
626 | return IRQ_HANDLED; | 592 | return IRQ_HANDLED; |
@@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | |||
644 | u32 bytes_left; | 610 | u32 bytes_left; |
645 | u32 sccr1_reg; | 611 | u32 sccr1_reg; |
646 | 612 | ||
647 | sccr1_reg = read_SSCR1(reg); | 613 | sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1); |
648 | sccr1_reg &= ~SSCR1_TIE; | 614 | sccr1_reg &= ~SSCR1_TIE; |
649 | 615 | ||
650 | /* | 616 | /* |
@@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | |||
670 | 636 | ||
671 | pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre); | 637 | pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre); |
672 | } | 638 | } |
673 | write_SSCR1(sccr1_reg, reg); | 639 | pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); |
674 | } | 640 | } |
675 | 641 | ||
676 | /* We did something */ | 642 | /* We did something */ |
@@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | |||
680 | static irqreturn_t ssp_int(int irq, void *dev_id) | 646 | static irqreturn_t ssp_int(int irq, void *dev_id) |
681 | { | 647 | { |
682 | struct driver_data *drv_data = dev_id; | 648 | struct driver_data *drv_data = dev_id; |
683 | void __iomem *reg = drv_data->ioaddr; | ||
684 | u32 sccr1_reg; | 649 | u32 sccr1_reg; |
685 | u32 mask = drv_data->mask_sr; | 650 | u32 mask = drv_data->mask_sr; |
686 | u32 status; | 651 | u32 status; |
@@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
700 | * are all set to one. That means that the device is already | 665 | * are all set to one. That means that the device is already |
701 | * powered off. | 666 | * powered off. |
702 | */ | 667 | */ |
703 | status = read_SSSR(reg); | 668 | status = pxa2xx_spi_read(drv_data, SSSR); |
704 | if (status == ~0) | 669 | if (status == ~0) |
705 | return IRQ_NONE; | 670 | return IRQ_NONE; |
706 | 671 | ||
707 | sccr1_reg = read_SSCR1(reg); | 672 | sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1); |
708 | 673 | ||
709 | /* Ignore possible writes if we don't need to write */ | 674 | /* Ignore possible writes if we don't need to write */ |
710 | if (!(sccr1_reg & SSCR1_TIE)) | 675 | if (!(sccr1_reg & SSCR1_TIE)) |
@@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
715 | 680 | ||
716 | if (!drv_data->cur_msg) { | 681 | if (!drv_data->cur_msg) { |
717 | 682 | ||
718 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 683 | pxa2xx_spi_write(drv_data, SSCR0, |
719 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | 684 | pxa2xx_spi_read(drv_data, SSCR0) |
685 | & ~SSCR0_SSE); | ||
686 | pxa2xx_spi_write(drv_data, SSCR1, | ||
687 | pxa2xx_spi_read(drv_data, SSCR1) | ||
688 | & ~drv_data->int_cr1); | ||
720 | if (!pxa25x_ssp_comp(drv_data)) | 689 | if (!pxa25x_ssp_comp(drv_data)) |
721 | write_SSTO(0, reg); | 690 | pxa2xx_spi_write(drv_data, SSTO, 0); |
722 | write_SSSR_CS(drv_data, drv_data->clear_sr); | 691 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
723 | 692 | ||
724 | dev_err(&drv_data->pdev->dev, | 693 | dev_err(&drv_data->pdev->dev, |
@@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data) | |||
787 | struct spi_transfer *transfer = NULL; | 756 | struct spi_transfer *transfer = NULL; |
788 | struct spi_transfer *previous = NULL; | 757 | struct spi_transfer *previous = NULL; |
789 | struct chip_data *chip = NULL; | 758 | struct chip_data *chip = NULL; |
790 | void __iomem *reg = drv_data->ioaddr; | ||
791 | u32 clk_div = 0; | 759 | u32 clk_div = 0; |
792 | u8 bits = 0; | 760 | u8 bits = 0; |
793 | u32 speed = 0; | 761 | u32 speed = 0; |
@@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data) | |||
931 | 899 | ||
932 | /* Clear status and start DMA engine */ | 900 | /* Clear status and start DMA engine */ |
933 | cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; | 901 | cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; |
934 | write_SSSR(drv_data->clear_sr, reg); | 902 | pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr); |
935 | 903 | ||
936 | pxa2xx_spi_dma_start(drv_data); | 904 | pxa2xx_spi_dma_start(drv_data); |
937 | } else { | 905 | } else { |
@@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data) | |||
944 | } | 912 | } |
945 | 913 | ||
946 | if (is_lpss_ssp(drv_data)) { | 914 | if (is_lpss_ssp(drv_data)) { |
947 | if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold) | 915 | if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff) |
948 | write_SSIRF(chip->lpss_rx_threshold, reg); | 916 | != chip->lpss_rx_threshold) |
949 | if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold) | 917 | pxa2xx_spi_write(drv_data, SSIRF, |
950 | write_SSITF(chip->lpss_tx_threshold, reg); | 918 | chip->lpss_rx_threshold); |
919 | if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff) | ||
920 | != chip->lpss_tx_threshold) | ||
921 | pxa2xx_spi_write(drv_data, SSITF, | ||
922 | chip->lpss_tx_threshold); | ||
951 | } | 923 | } |
952 | 924 | ||
953 | if (is_quark_x1000_ssp(drv_data) && | 925 | if (is_quark_x1000_ssp(drv_data) && |
954 | (read_DDS_RATE(reg) != chip->dds_rate)) | 926 | (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate)) |
955 | write_DDS_RATE(chip->dds_rate, reg); | 927 | pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate); |
956 | 928 | ||
957 | /* see if we need to reload the config registers */ | 929 | /* see if we need to reload the config registers */ |
958 | if ((read_SSCR0(reg) != cr0) || | 930 | if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0) |
959 | (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) { | 931 | || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) |
960 | 932 | != (cr1 & change_mask)) { | |
961 | /* stop the SSP, and update the other bits */ | 933 | /* stop the SSP, and update the other bits */ |
962 | write_SSCR0(cr0 & ~SSCR0_SSE, reg); | 934 | pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); |
963 | if (!pxa25x_ssp_comp(drv_data)) | 935 | if (!pxa25x_ssp_comp(drv_data)) |
964 | write_SSTO(chip->timeout, reg); | 936 | pxa2xx_spi_write(drv_data, SSTO, chip->timeout); |
965 | /* first set CR1 without interrupt and service enables */ | 937 | /* first set CR1 without interrupt and service enables */ |
966 | write_SSCR1(cr1 & change_mask, reg); | 938 | pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask); |
967 | /* restart the SSP */ | 939 | /* restart the SSP */ |
968 | write_SSCR0(cr0, reg); | 940 | pxa2xx_spi_write(drv_data, SSCR0, cr0); |
969 | 941 | ||
970 | } else { | 942 | } else { |
971 | if (!pxa25x_ssp_comp(drv_data)) | 943 | if (!pxa25x_ssp_comp(drv_data)) |
972 | write_SSTO(chip->timeout, reg); | 944 | pxa2xx_spi_write(drv_data, SSTO, chip->timeout); |
973 | } | 945 | } |
974 | 946 | ||
975 | cs_assert(drv_data); | 947 | cs_assert(drv_data); |
976 | 948 | ||
977 | /* after chip select, release the data by enabling service | 949 | /* after chip select, release the data by enabling service |
978 | * requests and interrupts, without changing any mode bits */ | 950 | * requests and interrupts, without changing any mode bits */ |
979 | write_SSCR1(cr1, reg); | 951 | pxa2xx_spi_write(drv_data, SSCR1, cr1); |
980 | } | 952 | } |
981 | 953 | ||
982 | static int pxa2xx_spi_transfer_one_message(struct spi_master *master, | 954 | static int pxa2xx_spi_transfer_one_message(struct spi_master *master, |
@@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master) | |||
1005 | struct driver_data *drv_data = spi_master_get_devdata(master); | 977 | struct driver_data *drv_data = spi_master_get_devdata(master); |
1006 | 978 | ||
1007 | /* Disable the SSP now */ | 979 | /* Disable the SSP now */ |
1008 | write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE, | 980 | pxa2xx_spi_write(drv_data, SSCR0, |
1009 | drv_data->ioaddr); | 981 | pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); |
1010 | 982 | ||
1011 | return 0; | 983 | return 0; |
1012 | } | 984 | } |
@@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1289 | struct driver_data *drv_data; | 1261 | struct driver_data *drv_data; |
1290 | struct ssp_device *ssp; | 1262 | struct ssp_device *ssp; |
1291 | int status; | 1263 | int status; |
1264 | u32 tmp; | ||
1292 | 1265 | ||
1293 | platform_info = dev_get_platdata(dev); | 1266 | platform_info = dev_get_platdata(dev); |
1294 | if (!platform_info) { | 1267 | if (!platform_info) { |
@@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1386 | drv_data->max_clk_rate = clk_get_rate(ssp->clk); | 1359 | drv_data->max_clk_rate = clk_get_rate(ssp->clk); |
1387 | 1360 | ||
1388 | /* Load default SSP configuration */ | 1361 | /* Load default SSP configuration */ |
1389 | write_SSCR0(0, drv_data->ioaddr); | 1362 | pxa2xx_spi_write(drv_data, SSCR0, 0); |
1390 | switch (drv_data->ssp_type) { | 1363 | switch (drv_data->ssp_type) { |
1391 | case QUARK_X1000_SSP: | 1364 | case QUARK_X1000_SSP: |
1392 | write_SSCR1(QUARK_X1000_SSCR1_RxTresh( | 1365 | tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
1393 | RX_THRESH_QUARK_X1000_DFLT) | | 1366 | | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT); |
1394 | QUARK_X1000_SSCR1_TxTresh( | 1367 | pxa2xx_spi_write(drv_data, SSCR1, tmp); |
1395 | TX_THRESH_QUARK_X1000_DFLT), | ||
1396 | drv_data->ioaddr); | ||
1397 | 1368 | ||
1398 | /* using the Motorola SPI protocol and use 8 bit frame */ | 1369 | /* using the Motorola SPI protocol and use 8 bit frame */ |
1399 | write_SSCR0(QUARK_X1000_SSCR0_Motorola | 1370 | pxa2xx_spi_write(drv_data, SSCR0, |
1400 | | QUARK_X1000_SSCR0_DataSize(8), | 1371 | QUARK_X1000_SSCR0_Motorola |
1401 | drv_data->ioaddr); | 1372 | | QUARK_X1000_SSCR0_DataSize(8)); |
1402 | break; | 1373 | break; |
1403 | default: | 1374 | default: |
1404 | write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | | 1375 | tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | |
1405 | SSCR1_TxTresh(TX_THRESH_DFLT), | 1376 | SSCR1_TxTresh(TX_THRESH_DFLT); |
1406 | drv_data->ioaddr); | 1377 | pxa2xx_spi_write(drv_data, SSCR1, tmp); |
1407 | write_SSCR0(SSCR0_SCR(2) | 1378 | tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); |
1408 | | SSCR0_Motorola | 1379 | pxa2xx_spi_write(drv_data, SSCR0, tmp); |
1409 | | SSCR0_DataSize(8), | ||
1410 | drv_data->ioaddr); | ||
1411 | break; | 1380 | break; |
1412 | } | 1381 | } |
1413 | 1382 | ||
1414 | if (!pxa25x_ssp_comp(drv_data)) | 1383 | if (!pxa25x_ssp_comp(drv_data)) |
1415 | write_SSTO(0, drv_data->ioaddr); | 1384 | pxa2xx_spi_write(drv_data, SSTO, 0); |
1416 | 1385 | ||
1417 | if (!is_quark_x1000_ssp(drv_data)) | 1386 | if (!is_quark_x1000_ssp(drv_data)) |
1418 | write_SSPSP(0, drv_data->ioaddr); | 1387 | pxa2xx_spi_write(drv_data, SSPSP, 0); |
1419 | 1388 | ||
1420 | lpss_ssp_setup(drv_data); | 1389 | if (is_lpss_ssp(drv_data)) |
1390 | lpss_ssp_setup(drv_data); | ||
1421 | 1391 | ||
1422 | tasklet_init(&drv_data->pump_transfers, pump_transfers, | 1392 | tasklet_init(&drv_data->pump_transfers, pump_transfers, |
1423 | (unsigned long)drv_data); | 1393 | (unsigned long)drv_data); |
@@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) | |||
1460 | pm_runtime_get_sync(&pdev->dev); | 1430 | pm_runtime_get_sync(&pdev->dev); |
1461 | 1431 | ||
1462 | /* Disable the SSP at the peripheral and SOC level */ | 1432 | /* Disable the SSP at the peripheral and SOC level */ |
1463 | write_SSCR0(0, drv_data->ioaddr); | 1433 | pxa2xx_spi_write(drv_data, SSCR0, 0); |
1464 | clk_disable_unprepare(ssp->clk); | 1434 | clk_disable_unprepare(ssp->clk); |
1465 | 1435 | ||
1466 | /* Release DMA */ | 1436 | /* Release DMA */ |
@@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev) | |||
1497 | status = spi_master_suspend(drv_data->master); | 1467 | status = spi_master_suspend(drv_data->master); |
1498 | if (status != 0) | 1468 | if (status != 0) |
1499 | return status; | 1469 | return status; |
1500 | write_SSCR0(0, drv_data->ioaddr); | 1470 | pxa2xx_spi_write(drv_data, SSCR0, 0); |
1501 | 1471 | ||
1502 | if (!pm_runtime_suspended(dev)) | 1472 | if (!pm_runtime_suspended(dev)) |
1503 | clk_disable_unprepare(ssp->clk); | 1473 | clk_disable_unprepare(ssp->clk); |
@@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev) | |||
1518 | clk_prepare_enable(ssp->clk); | 1488 | clk_prepare_enable(ssp->clk); |
1519 | 1489 | ||
1520 | /* Restore LPSS private register bits */ | 1490 | /* Restore LPSS private register bits */ |
1521 | lpss_ssp_setup(drv_data); | 1491 | if (is_lpss_ssp(drv_data)) |
1492 | lpss_ssp_setup(drv_data); | ||
1522 | 1493 | ||
1523 | /* Start the queue running */ | 1494 | /* Start the queue running */ |
1524 | status = spi_master_resume(drv_data->master); | 1495 | status = spi_master_resume(drv_data->master); |
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 6bec59c90cd4..85a58c906869 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h | |||
@@ -115,23 +115,17 @@ struct chip_data { | |||
115 | void (*cs_control)(u32 command); | 115 | void (*cs_control)(u32 command); |
116 | }; | 116 | }; |
117 | 117 | ||
118 | #define DEFINE_SSP_REG(reg, off) \ | 118 | static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data, |
119 | static inline u32 read_##reg(void const __iomem *p) \ | 119 | unsigned reg) |
120 | { return __raw_readl(p + (off)); } \ | 120 | { |
121 | \ | 121 | return __raw_readl(drv_data->ioaddr + reg); |
122 | static inline void write_##reg(u32 v, void __iomem *p) \ | 122 | } |
123 | { __raw_writel(v, p + (off)); } | 123 | |
124 | 124 | static inline void pxa2xx_spi_write(const struct driver_data *drv_data, | |
125 | DEFINE_SSP_REG(SSCR0, 0x00) | 125 | unsigned reg, u32 val) |
126 | DEFINE_SSP_REG(SSCR1, 0x04) | 126 | { |
127 | DEFINE_SSP_REG(SSSR, 0x08) | 127 | __raw_writel(val, drv_data->ioaddr + reg); |
128 | DEFINE_SSP_REG(SSITR, 0x0c) | 128 | } |
129 | DEFINE_SSP_REG(SSDR, 0x10) | ||
130 | DEFINE_SSP_REG(DDS_RATE, 0x28) /* DDS Clock Rate */ | ||
131 | DEFINE_SSP_REG(SSTO, 0x28) | ||
132 | DEFINE_SSP_REG(SSPSP, 0x2c) | ||
133 | DEFINE_SSP_REG(SSITF, SSITF) | ||
134 | DEFINE_SSP_REG(SSIRF, SSIRF) | ||
135 | 129 | ||
136 | #define START_STATE ((void *)0) | 130 | #define START_STATE ((void *)0) |
137 | #define RUNNING_STATE ((void *)1) | 131 | #define RUNNING_STATE ((void *)1) |
@@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data) | |||
155 | 149 | ||
156 | static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) | 150 | static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val) |
157 | { | 151 | { |
158 | void __iomem *reg = drv_data->ioaddr; | ||
159 | |||
160 | if (drv_data->ssp_type == CE4100_SSP || | 152 | if (drv_data->ssp_type == CE4100_SSP || |
161 | drv_data->ssp_type == QUARK_X1000_SSP) | 153 | drv_data->ssp_type == QUARK_X1000_SSP) |
162 | val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; | 154 | val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK; |
163 | 155 | ||
164 | write_SSSR(val, reg); | 156 | pxa2xx_spi_write(drv_data, SSSR, val); |
165 | } | 157 | } |
166 | 158 | ||
167 | extern int pxa2xx_spi_flush(struct driver_data *drv_data); | 159 | extern int pxa2xx_spi_flush(struct driver_data *drv_data); |
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index e7fb5a0d2e8d..ff9cdbdb6672 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
@@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) | |||
337 | static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | 337 | static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) |
338 | { | 338 | { |
339 | struct spi_qup *controller = spi_master_get_devdata(spi->master); | 339 | struct spi_qup *controller = spi_master_get_devdata(spi->master); |
340 | u32 config, iomode, mode; | 340 | u32 config, iomode, mode, control; |
341 | int ret, n_words, w_size; | 341 | int ret, n_words, w_size; |
342 | 342 | ||
343 | if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { | 343 | if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { |
@@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) | |||
392 | 392 | ||
393 | writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); | 393 | writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); |
394 | 394 | ||
395 | control = readl_relaxed(controller->base + SPI_IO_CONTROL); | ||
396 | |||
397 | if (spi->mode & SPI_CPOL) | ||
398 | control |= SPI_IO_C_CLK_IDLE_HIGH; | ||
399 | else | ||
400 | control &= ~SPI_IO_C_CLK_IDLE_HIGH; | ||
401 | |||
402 | writel_relaxed(control, controller->base + SPI_IO_CONTROL); | ||
403 | |||
395 | config = readl_relaxed(controller->base + SPI_CONFIG); | 404 | config = readl_relaxed(controller->base + SPI_CONFIG); |
396 | 405 | ||
397 | if (spi->mode & SPI_LOOP) | 406 | if (spi->mode & SPI_LOOP) |
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index daabbabd26b0..1a777dc261d6 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c | |||
@@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) | |||
437 | rs->state &= ~TXBUSY; | 437 | rs->state &= ~TXBUSY; |
438 | spin_unlock_irqrestore(&rs->lock, flags); | 438 | spin_unlock_irqrestore(&rs->lock, flags); |
439 | 439 | ||
440 | rxdesc = NULL; | ||
440 | if (rs->rx) { | 441 | if (rs->rx) { |
441 | rxconf.direction = rs->dma_rx.direction; | 442 | rxconf.direction = rs->dma_rx.direction; |
442 | rxconf.src_addr = rs->dma_rx.addr; | 443 | rxconf.src_addr = rs->dma_rx.addr; |
@@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) | |||
453 | rxdesc->callback_param = rs; | 454 | rxdesc->callback_param = rs; |
454 | } | 455 | } |
455 | 456 | ||
457 | txdesc = NULL; | ||
456 | if (rs->tx) { | 458 | if (rs->tx) { |
457 | txconf.direction = rs->dma_tx.direction; | 459 | txconf.direction = rs->dma_tx.direction; |
458 | txconf.dst_addr = rs->dma_tx.addr; | 460 | txconf.dst_addr = rs->dma_tx.addr; |
@@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) | |||
470 | } | 472 | } |
471 | 473 | ||
472 | /* rx must be started before tx due to spi instinct */ | 474 | /* rx must be started before tx due to spi instinct */ |
473 | if (rs->rx) { | 475 | if (rxdesc) { |
474 | spin_lock_irqsave(&rs->lock, flags); | 476 | spin_lock_irqsave(&rs->lock, flags); |
475 | rs->state |= RXBUSY; | 477 | rs->state |= RXBUSY; |
476 | spin_unlock_irqrestore(&rs->lock, flags); | 478 | spin_unlock_irqrestore(&rs->lock, flags); |
@@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) | |||
478 | dma_async_issue_pending(rs->dma_rx.ch); | 480 | dma_async_issue_pending(rs->dma_rx.ch); |
479 | } | 481 | } |
480 | 482 | ||
481 | if (rs->tx) { | 483 | if (txdesc) { |
482 | spin_lock_irqsave(&rs->lock, flags); | 484 | spin_lock_irqsave(&rs->lock, flags); |
483 | rs->state |= TXBUSY; | 485 | rs->state |= TXBUSY; |
484 | spin_unlock_irqrestore(&rs->lock, flags); | 486 | spin_unlock_irqrestore(&rs->lock, flags); |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 2071f788c6fb..46ce47076e63 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
@@ -15,11 +15,6 @@ | |||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. | 17 | * GNU General Public License for more details. |
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * | ||
23 | */ | 18 | */ |
24 | 19 | ||
25 | #include <linux/module.h> | 20 | #include <linux/module.h> |
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 37b19836f5cb..9231c34b5a5c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -11,10 +11,6 @@ | |||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | 14 | */ |
19 | 15 | ||
20 | #include <linux/init.h> | 16 | #include <linux/init.h> |
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index 237f2e7a7179..5a56acf8a43e 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c | |||
@@ -12,10 +12,6 @@ | |||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | 15 | */ |
20 | 16 | ||
21 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index fc29233d0650..20e800e70442 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c | |||
@@ -16,11 +16,6 @@ | |||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
23 | * | ||
24 | */ | 19 | */ |
25 | 20 | ||
26 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 96a5fc0878d8..e57eec0b2f46 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
@@ -82,7 +82,9 @@ struct sh_msiof_spi_priv { | |||
82 | #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ | 82 | #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ |
83 | #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ | 83 | #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ |
84 | #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ | 84 | #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ |
85 | #define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */ | 85 | #define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ |
86 | #define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ | ||
87 | #define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ | ||
86 | #define MDR1_FLD_SHIFT 2 | 88 | #define MDR1_FLD_SHIFT 2 |
87 | #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ | 89 | #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ |
88 | /* TMDR1 */ | 90 | /* TMDR1 */ |
@@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) | |||
241 | 243 | ||
242 | static struct { | 244 | static struct { |
243 | unsigned short div; | 245 | unsigned short div; |
244 | unsigned short scr; | 246 | unsigned short brdv; |
245 | } const sh_msiof_spi_clk_table[] = { | 247 | } const sh_msiof_spi_div_table[] = { |
246 | { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 }, | 248 | { 1, SCR_BRDV_DIV_1 }, |
247 | { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 }, | 249 | { 2, SCR_BRDV_DIV_2 }, |
248 | { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 }, | 250 | { 4, SCR_BRDV_DIV_4 }, |
249 | { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 }, | 251 | { 8, SCR_BRDV_DIV_8 }, |
250 | { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 }, | 252 | { 16, SCR_BRDV_DIV_16 }, |
251 | { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 }, | 253 | { 32, SCR_BRDV_DIV_32 }, |
252 | { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 }, | ||
253 | { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 }, | ||
254 | { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 }, | ||
255 | { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 }, | ||
256 | { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 }, | ||
257 | }; | 254 | }; |
258 | 255 | ||
259 | static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, | 256 | static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, |
260 | unsigned long parent_rate, u32 spi_hz) | 257 | unsigned long parent_rate, u32 spi_hz) |
261 | { | 258 | { |
262 | unsigned long div = 1024; | 259 | unsigned long div = 1024; |
260 | u32 brps, scr; | ||
263 | size_t k; | 261 | size_t k; |
264 | 262 | ||
265 | if (!WARN_ON(!spi_hz || !parent_rate)) | 263 | if (!WARN_ON(!spi_hz || !parent_rate)) |
266 | div = DIV_ROUND_UP(parent_rate, spi_hz); | 264 | div = DIV_ROUND_UP(parent_rate, spi_hz); |
267 | 265 | ||
268 | /* TODO: make more fine grained */ | 266 | for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { |
269 | 267 | brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); | |
270 | for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) { | 268 | if (brps <= 32) /* max of brdv is 32 */ |
271 | if (sh_msiof_spi_clk_table[k].div >= div) | ||
272 | break; | 269 | break; |
273 | } | 270 | } |
274 | 271 | ||
275 | k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1); | 272 | k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); |
276 | 273 | ||
277 | sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr); | 274 | scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); |
275 | sh_msiof_write(p, TSCR, scr); | ||
278 | if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX)) | 276 | if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX)) |
279 | sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr); | 277 | sh_msiof_write(p, RSCR, scr); |
278 | } | ||
279 | |||
280 | static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl) | ||
281 | { | ||
282 | /* | ||
283 | * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl | ||
284 | * b'000 : 0 | ||
285 | * b'001 : 100 | ||
286 | * b'010 : 200 | ||
287 | * b'011 (SYNCDL only) : 300 | ||
288 | * b'101 : 50 | ||
289 | * b'110 : 150 | ||
290 | */ | ||
291 | if (dtdl_or_syncdl % 100) | ||
292 | return dtdl_or_syncdl / 100 + 5; | ||
293 | else | ||
294 | return dtdl_or_syncdl / 100; | ||
295 | } | ||
296 | |||
297 | static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) | ||
298 | { | ||
299 | u32 val; | ||
300 | |||
301 | if (!p->info) | ||
302 | return 0; | ||
303 | |||
304 | /* check if DTDL and SYNCDL is allowed value */ | ||
305 | if (p->info->dtdl > 200 || p->info->syncdl > 300) { | ||
306 | dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n"); | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | /* check if the sum of DTDL and SYNCDL becomes an integer value */ | ||
311 | if ((p->info->dtdl + p->info->syncdl) % 100) { | ||
312 | dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n"); | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT; | ||
317 | val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT; | ||
318 | |||
319 | return val; | ||
280 | } | 320 | } |
281 | 321 | ||
282 | static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, | 322 | static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, |
@@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, | |||
296 | tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; | 336 | tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; |
297 | tmp |= !cs_high << MDR1_SYNCAC_SHIFT; | 337 | tmp |= !cs_high << MDR1_SYNCAC_SHIFT; |
298 | tmp |= lsb_first << MDR1_BITLSB_SHIFT; | 338 | tmp |= lsb_first << MDR1_BITLSB_SHIFT; |
339 | tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); | ||
299 | sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON); | 340 | sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON); |
300 | if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) { | 341 | if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) { |
301 | /* These bits are reserved if RX needs TX */ | 342 | /* These bits are reserved if RX needs TX */ |
@@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi) | |||
501 | gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); | 542 | gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); |
502 | 543 | ||
503 | 544 | ||
504 | pm_runtime_put_sync(&p->pdev->dev); | 545 | pm_runtime_put(&p->pdev->dev); |
505 | 546 | ||
506 | return 0; | 547 | return 0; |
507 | } | 548 | } |
@@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, | |||
595 | } | 636 | } |
596 | 637 | ||
597 | /* wait for tx fifo to be emptied / rx fifo to be filled */ | 638 | /* wait for tx fifo to be emptied / rx fifo to be filled */ |
598 | ret = wait_for_completion_timeout(&p->done, HZ); | 639 | if (!wait_for_completion_timeout(&p->done, HZ)) { |
599 | if (!ret) { | ||
600 | dev_err(&p->pdev->dev, "PIO timeout\n"); | 640 | dev_err(&p->pdev->dev, "PIO timeout\n"); |
601 | ret = -ETIMEDOUT; | 641 | ret = -ETIMEDOUT; |
602 | goto stop_reset; | 642 | goto stop_reset; |
@@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, | |||
706 | } | 746 | } |
707 | 747 | ||
708 | /* wait for tx fifo to be emptied / rx fifo to be filled */ | 748 | /* wait for tx fifo to be emptied / rx fifo to be filled */ |
709 | ret = wait_for_completion_timeout(&p->done, HZ); | 749 | if (!wait_for_completion_timeout(&p->done, HZ)) { |
710 | if (!ret) { | ||
711 | dev_err(&p->pdev->dev, "DMA timeout\n"); | 750 | dev_err(&p->pdev->dev, "DMA timeout\n"); |
712 | ret = -ETIMEDOUT; | 751 | ret = -ETIMEDOUT; |
713 | goto stop_reset; | 752 | goto stop_reset; |
@@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) | |||
957 | &info->tx_fifo_override); | 996 | &info->tx_fifo_override); |
958 | of_property_read_u32(np, "renesas,rx-fifo-size", | 997 | of_property_read_u32(np, "renesas,rx-fifo-size", |
959 | &info->rx_fifo_override); | 998 | &info->rx_fifo_override); |
999 | of_property_read_u32(np, "renesas,dtdl", &info->dtdl); | ||
1000 | of_property_read_u32(np, "renesas,syncdl", &info->syncdl); | ||
960 | 1001 | ||
961 | info->num_chipselect = num_cs; | 1002 | info->num_chipselect = num_cs; |
962 | 1003 | ||
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c index 1cfc906dd174..502501187c9e 100644 --- a/drivers/spi/spi-sh.c +++ b/drivers/spi/spi-sh.c | |||
@@ -14,11 +14,6 @@ | |||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
21 | * | ||
22 | */ | 17 | */ |
23 | 18 | ||
24 | #include <linux/module.h> | 19 | #include <linux/module.h> |
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index d075191476f0..f5715c9f68b0 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c | |||
@@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend, | |||
818 | 818 | ||
819 | static const struct of_device_id spi_sirfsoc_of_match[] = { | 819 | static const struct of_device_id spi_sirfsoc_of_match[] = { |
820 | { .compatible = "sirf,prima2-spi", }, | 820 | { .compatible = "sirf,prima2-spi", }, |
821 | { .compatible = "sirf,marco-spi", }, | ||
822 | {} | 821 | {} |
823 | }; | 822 | }; |
824 | MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); | 823 | MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); |
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c new file mode 100644 index 000000000000..2faeaa7b57a8 --- /dev/null +++ b/drivers/spi/spi-st-ssc4.c | |||
@@ -0,0 +1,504 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008-2014 STMicroelectronics Limited | ||
3 | * | ||
4 | * Author: Angus Clark <Angus.Clark@st.com> | ||
5 | * Patrice Chotard <patrice.chotard@st.com> | ||
6 | * Lee Jones <lee.jones@linaro.org> | ||
7 | * | ||
8 | * SPI master mode controller driver, used in STMicroelectronics devices. | ||
9 | * | ||
10 | * May be copied or modified under the terms of the GNU General Public | ||
11 | * License Version 2.0 only. See linux/COPYING for more information. | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/pinctrl/consumer.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/of_gpio.h> | ||
23 | #include <linux/of_irq.h> | ||
24 | #include <linux/pm_runtime.h> | ||
25 | #include <linux/spi/spi.h> | ||
26 | #include <linux/spi/spi_bitbang.h> | ||
27 | |||
28 | /* SSC registers */ | ||
29 | #define SSC_BRG 0x000 | ||
30 | #define SSC_TBUF 0x004 | ||
31 | #define SSC_RBUF 0x008 | ||
32 | #define SSC_CTL 0x00C | ||
33 | #define SSC_IEN 0x010 | ||
34 | #define SSC_I2C 0x018 | ||
35 | |||
36 | /* SSC Control */ | ||
37 | #define SSC_CTL_DATA_WIDTH_9 0x8 | ||
38 | #define SSC_CTL_DATA_WIDTH_MSK 0xf | ||
39 | #define SSC_CTL_BM 0xf | ||
40 | #define SSC_CTL_HB BIT(4) | ||
41 | #define SSC_CTL_PH BIT(5) | ||
42 | #define SSC_CTL_PO BIT(6) | ||
43 | #define SSC_CTL_SR BIT(7) | ||
44 | #define SSC_CTL_MS BIT(8) | ||
45 | #define SSC_CTL_EN BIT(9) | ||
46 | #define SSC_CTL_LPB BIT(10) | ||
47 | #define SSC_CTL_EN_TX_FIFO BIT(11) | ||
48 | #define SSC_CTL_EN_RX_FIFO BIT(12) | ||
49 | #define SSC_CTL_EN_CLST_RX BIT(13) | ||
50 | |||
51 | /* SSC Interrupt Enable */ | ||
52 | #define SSC_IEN_TEEN BIT(2) | ||
53 | |||
54 | #define FIFO_SIZE 8 | ||
55 | |||
56 | struct spi_st { | ||
57 | /* SSC SPI Controller */ | ||
58 | void __iomem *base; | ||
59 | struct clk *clk; | ||
60 | struct device *dev; | ||
61 | |||
62 | /* SSC SPI current transaction */ | ||
63 | const u8 *tx_ptr; | ||
64 | u8 *rx_ptr; | ||
65 | u16 bytes_per_word; | ||
66 | unsigned int words_remaining; | ||
67 | unsigned int baud; | ||
68 | struct completion done; | ||
69 | }; | ||
70 | |||
71 | static int spi_st_clk_enable(struct spi_st *spi_st) | ||
72 | { | ||
73 | /* | ||
74 | * Current platforms use one of the core clocks for SPI and I2C. | ||
75 | * If we attempt to disable the clock, the system will hang. | ||
76 | * | ||
77 | * TODO: Remove this when platform supports power domains. | ||
78 | */ | ||
79 | return 0; | ||
80 | |||
81 | return clk_prepare_enable(spi_st->clk); | ||
82 | } | ||
83 | |||
84 | static void spi_st_clk_disable(struct spi_st *spi_st) | ||
85 | { | ||
86 | /* | ||
87 | * Current platforms use one of the core clocks for SPI and I2C. | ||
88 | * If we attempt to disable the clock, the system will hang. | ||
89 | * | ||
90 | * TODO: Remove this when platform supports power domains. | ||
91 | */ | ||
92 | return; | ||
93 | |||
94 | clk_disable_unprepare(spi_st->clk); | ||
95 | } | ||
96 | |||
97 | /* Load the TX FIFO */ | ||
98 | static void ssc_write_tx_fifo(struct spi_st *spi_st) | ||
99 | { | ||
100 | unsigned int count, i; | ||
101 | uint32_t word = 0; | ||
102 | |||
103 | if (spi_st->words_remaining > FIFO_SIZE) | ||
104 | count = FIFO_SIZE; | ||
105 | else | ||
106 | count = spi_st->words_remaining; | ||
107 | |||
108 | for (i = 0; i < count; i++) { | ||
109 | if (spi_st->tx_ptr) { | ||
110 | if (spi_st->bytes_per_word == 1) { | ||
111 | word = *spi_st->tx_ptr++; | ||
112 | } else { | ||
113 | word = *spi_st->tx_ptr++; | ||
114 | word = *spi_st->tx_ptr++ | (word << 8); | ||
115 | } | ||
116 | } | ||
117 | writel_relaxed(word, spi_st->base + SSC_TBUF); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | /* Read the RX FIFO */ | ||
122 | static void ssc_read_rx_fifo(struct spi_st *spi_st) | ||
123 | { | ||
124 | unsigned int count, i; | ||
125 | uint32_t word = 0; | ||
126 | |||
127 | if (spi_st->words_remaining > FIFO_SIZE) | ||
128 | count = FIFO_SIZE; | ||
129 | else | ||
130 | count = spi_st->words_remaining; | ||
131 | |||
132 | for (i = 0; i < count; i++) { | ||
133 | word = readl_relaxed(spi_st->base + SSC_RBUF); | ||
134 | |||
135 | if (spi_st->rx_ptr) { | ||
136 | if (spi_st->bytes_per_word == 1) { | ||
137 | *spi_st->rx_ptr++ = (uint8_t)word; | ||
138 | } else { | ||
139 | *spi_st->rx_ptr++ = (word >> 8); | ||
140 | *spi_st->rx_ptr++ = word & 0xff; | ||
141 | } | ||
142 | } | ||
143 | } | ||
144 | spi_st->words_remaining -= count; | ||
145 | } | ||
146 | |||
147 | static int spi_st_transfer_one(struct spi_master *master, | ||
148 | struct spi_device *spi, struct spi_transfer *t) | ||
149 | { | ||
150 | struct spi_st *spi_st = spi_master_get_devdata(master); | ||
151 | uint32_t ctl = 0; | ||
152 | |||
153 | /* Setup transfer */ | ||
154 | spi_st->tx_ptr = t->tx_buf; | ||
155 | spi_st->rx_ptr = t->rx_buf; | ||
156 | |||
157 | if (spi->bits_per_word > 8) { | ||
158 | /* | ||
159 | * Anything greater than 8 bits-per-word requires 2 | ||
160 | * bytes-per-word in the RX/TX buffers | ||
161 | */ | ||
162 | spi_st->bytes_per_word = 2; | ||
163 | spi_st->words_remaining = t->len / 2; | ||
164 | |||
165 | } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) { | ||
166 | /* | ||
167 | * If transfer is even-length, and 8 bits-per-word, then | ||
168 | * implement as half-length 16 bits-per-word transfer | ||
169 | */ | ||
170 | spi_st->bytes_per_word = 2; | ||
171 | spi_st->words_remaining = t->len / 2; | ||
172 | |||
173 | /* Set SSC_CTL to 16 bits-per-word */ | ||
174 | ctl = readl_relaxed(spi_st->base + SSC_CTL); | ||
175 | writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL); | ||
176 | |||
177 | readl_relaxed(spi_st->base + SSC_RBUF); | ||
178 | |||
179 | } else { | ||
180 | spi_st->bytes_per_word = 1; | ||
181 | spi_st->words_remaining = t->len; | ||
182 | } | ||
183 | |||
184 | reinit_completion(&spi_st->done); | ||
185 | |||
186 | /* Start transfer by writing to the TX FIFO */ | ||
187 | ssc_write_tx_fifo(spi_st); | ||
188 | writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN); | ||
189 | |||
190 | /* Wait for transfer to complete */ | ||
191 | wait_for_completion(&spi_st->done); | ||
192 | |||
193 | /* Restore SSC_CTL if necessary */ | ||
194 | if (ctl) | ||
195 | writel_relaxed(ctl, spi_st->base + SSC_CTL); | ||
196 | |||
197 | spi_finalize_current_transfer(spi->master); | ||
198 | |||
199 | return t->len; | ||
200 | } | ||
201 | |||
202 | static void spi_st_cleanup(struct spi_device *spi) | ||
203 | { | ||
204 | int cs = spi->cs_gpio; | ||
205 | |||
206 | if (gpio_is_valid(cs)) | ||
207 | devm_gpio_free(&spi->dev, cs); | ||
208 | } | ||
209 | |||
210 | /* the spi->mode bits understood by this driver: */ | ||
211 | #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH) | ||
212 | static int spi_st_setup(struct spi_device *spi) | ||
213 | { | ||
214 | struct spi_st *spi_st = spi_master_get_devdata(spi->master); | ||
215 | u32 spi_st_clk, sscbrg, var; | ||
216 | u32 hz = spi->max_speed_hz; | ||
217 | int cs = spi->cs_gpio; | ||
218 | int ret; | ||
219 | |||
220 | if (!hz) { | ||
221 | dev_err(&spi->dev, "max_speed_hz unspecified\n"); | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | |||
225 | if (!gpio_is_valid(cs)) { | ||
226 | dev_err(&spi->dev, "%d is not a valid gpio\n", cs); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) { | ||
231 | dev_err(&spi->dev, "could not request gpio:%d\n", cs); | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | |||
235 | ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); | ||
236 | if (ret) | ||
237 | return ret; | ||
238 | |||
239 | spi_st_clk = clk_get_rate(spi_st->clk); | ||
240 | |||
241 | /* Set SSC_BRF */ | ||
242 | sscbrg = spi_st_clk / (2 * hz); | ||
243 | if (sscbrg < 0x07 || sscbrg > BIT(16)) { | ||
244 | dev_err(&spi->dev, | ||
245 | "baudrate %d outside valid range %d\n", sscbrg, hz); | ||
246 | return -EINVAL; | ||
247 | } | ||
248 | |||
249 | spi_st->baud = spi_st_clk / (2 * sscbrg); | ||
250 | if (sscbrg == BIT(16)) /* 16-bit counter wraps */ | ||
251 | sscbrg = 0x0; | ||
252 | |||
253 | writel_relaxed(sscbrg, spi_st->base + SSC_BRG); | ||
254 | |||
255 | dev_dbg(&spi->dev, | ||
256 | "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n", | ||
257 | hz, spi_st->baud, sscbrg); | ||
258 | |||
259 | /* Set SSC_CTL and enable SSC */ | ||
260 | var = readl_relaxed(spi_st->base + SSC_CTL); | ||
261 | var |= SSC_CTL_MS; | ||
262 | |||
263 | if (spi->mode & SPI_CPOL) | ||
264 | var |= SSC_CTL_PO; | ||
265 | else | ||
266 | var &= ~SSC_CTL_PO; | ||
267 | |||
268 | if (spi->mode & SPI_CPHA) | ||
269 | var |= SSC_CTL_PH; | ||
270 | else | ||
271 | var &= ~SSC_CTL_PH; | ||
272 | |||
273 | if ((spi->mode & SPI_LSB_FIRST) == 0) | ||
274 | var |= SSC_CTL_HB; | ||
275 | else | ||
276 | var &= ~SSC_CTL_HB; | ||
277 | |||
278 | if (spi->mode & SPI_LOOP) | ||
279 | var |= SSC_CTL_LPB; | ||
280 | else | ||
281 | var &= ~SSC_CTL_LPB; | ||
282 | |||
283 | var &= ~SSC_CTL_DATA_WIDTH_MSK; | ||
284 | var |= (spi->bits_per_word - 1); | ||
285 | |||
286 | var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO; | ||
287 | var |= SSC_CTL_EN; | ||
288 | |||
289 | writel_relaxed(var, spi_st->base + SSC_CTL); | ||
290 | |||
291 | /* Clear the status register */ | ||
292 | readl_relaxed(spi_st->base + SSC_RBUF); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | /* Interrupt fired when TX shift register becomes empty */ | ||
298 | static irqreturn_t spi_st_irq(int irq, void *dev_id) | ||
299 | { | ||
300 | struct spi_st *spi_st = (struct spi_st *)dev_id; | ||
301 | |||
302 | /* Read RX FIFO */ | ||
303 | ssc_read_rx_fifo(spi_st); | ||
304 | |||
305 | /* Fill TX FIFO */ | ||
306 | if (spi_st->words_remaining) { | ||
307 | ssc_write_tx_fifo(spi_st); | ||
308 | } else { | ||
309 | /* TX/RX complete */ | ||
310 | writel_relaxed(0x0, spi_st->base + SSC_IEN); | ||
311 | /* | ||
312 | * read SSC_IEN to ensure that this bit is set | ||
313 | * before re-enabling interrupt | ||
314 | */ | ||
315 | readl(spi_st->base + SSC_IEN); | ||
316 | complete(&spi_st->done); | ||
317 | } | ||
318 | |||
319 | return IRQ_HANDLED; | ||
320 | } | ||
321 | |||
322 | static int spi_st_probe(struct platform_device *pdev) | ||
323 | { | ||
324 | struct device_node *np = pdev->dev.of_node; | ||
325 | struct spi_master *master; | ||
326 | struct resource *res; | ||
327 | struct spi_st *spi_st; | ||
328 | int irq, ret = 0; | ||
329 | u32 var; | ||
330 | |||
331 | master = spi_alloc_master(&pdev->dev, sizeof(*spi_st)); | ||
332 | if (!master) | ||
333 | return -ENOMEM; | ||
334 | |||
335 | master->dev.of_node = np; | ||
336 | master->mode_bits = MODEBITS; | ||
337 | master->setup = spi_st_setup; | ||
338 | master->cleanup = spi_st_cleanup; | ||
339 | master->transfer_one = spi_st_transfer_one; | ||
340 | master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); | ||
341 | master->auto_runtime_pm = true; | ||
342 | master->bus_num = pdev->id; | ||
343 | spi_st = spi_master_get_devdata(master); | ||
344 | |||
345 | spi_st->clk = devm_clk_get(&pdev->dev, "ssc"); | ||
346 | if (IS_ERR(spi_st->clk)) { | ||
347 | dev_err(&pdev->dev, "Unable to request clock\n"); | ||
348 | return PTR_ERR(spi_st->clk); | ||
349 | } | ||
350 | |||
351 | ret = spi_st_clk_enable(spi_st); | ||
352 | if (ret) | ||
353 | return ret; | ||
354 | |||
355 | init_completion(&spi_st->done); | ||
356 | |||
357 | /* Get resources */ | ||
358 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
359 | spi_st->base = devm_ioremap_resource(&pdev->dev, res); | ||
360 | if (IS_ERR(spi_st->base)) { | ||
361 | ret = PTR_ERR(spi_st->base); | ||
362 | goto clk_disable; | ||
363 | } | ||
364 | |||
365 | /* Disable I2C and Reset SSC */ | ||
366 | writel_relaxed(0x0, spi_st->base + SSC_I2C); | ||
367 | var = readw_relaxed(spi_st->base + SSC_CTL); | ||
368 | var |= SSC_CTL_SR; | ||
369 | writel_relaxed(var, spi_st->base + SSC_CTL); | ||
370 | |||
371 | udelay(1); | ||
372 | var = readl_relaxed(spi_st->base + SSC_CTL); | ||
373 | var &= ~SSC_CTL_SR; | ||
374 | writel_relaxed(var, spi_st->base + SSC_CTL); | ||
375 | |||
376 | /* Set SSC into slave mode before reconfiguring PIO pins */ | ||
377 | var = readl_relaxed(spi_st->base + SSC_CTL); | ||
378 | var &= ~SSC_CTL_MS; | ||
379 | writel_relaxed(var, spi_st->base + SSC_CTL); | ||
380 | |||
381 | irq = irq_of_parse_and_map(np, 0); | ||
382 | if (!irq) { | ||
383 | dev_err(&pdev->dev, "IRQ missing or invalid\n"); | ||
384 | ret = -EINVAL; | ||
385 | goto clk_disable; | ||
386 | } | ||
387 | |||
388 | ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0, | ||
389 | pdev->name, spi_st); | ||
390 | if (ret) { | ||
391 | dev_err(&pdev->dev, "Failed to request irq %d\n", irq); | ||
392 | goto clk_disable; | ||
393 | } | ||
394 | |||
395 | /* by default the device is on */ | ||
396 | pm_runtime_set_active(&pdev->dev); | ||
397 | pm_runtime_enable(&pdev->dev); | ||
398 | |||
399 | platform_set_drvdata(pdev, master); | ||
400 | |||
401 | ret = devm_spi_register_master(&pdev->dev, master); | ||
402 | if (ret) { | ||
403 | dev_err(&pdev->dev, "Failed to register master\n"); | ||
404 | goto clk_disable; | ||
405 | } | ||
406 | |||
407 | return 0; | ||
408 | |||
409 | clk_disable: | ||
410 | spi_st_clk_disable(spi_st); | ||
411 | |||
412 | return ret; | ||
413 | } | ||
414 | |||
415 | static int spi_st_remove(struct platform_device *pdev) | ||
416 | { | ||
417 | struct spi_master *master = platform_get_drvdata(pdev); | ||
418 | struct spi_st *spi_st = spi_master_get_devdata(master); | ||
419 | |||
420 | spi_st_clk_disable(spi_st); | ||
421 | |||
422 | pinctrl_pm_select_sleep_state(&pdev->dev); | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | #ifdef CONFIG_PM | ||
428 | static int spi_st_runtime_suspend(struct device *dev) | ||
429 | { | ||
430 | struct spi_master *master = dev_get_drvdata(dev); | ||
431 | struct spi_st *spi_st = spi_master_get_devdata(master); | ||
432 | |||
433 | writel_relaxed(0, spi_st->base + SSC_IEN); | ||
434 | pinctrl_pm_select_sleep_state(dev); | ||
435 | |||
436 | spi_st_clk_disable(spi_st); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static int spi_st_runtime_resume(struct device *dev) | ||
442 | { | ||
443 | struct spi_master *master = dev_get_drvdata(dev); | ||
444 | struct spi_st *spi_st = spi_master_get_devdata(master); | ||
445 | int ret; | ||
446 | |||
447 | ret = spi_st_clk_enable(spi_st); | ||
448 | pinctrl_pm_select_default_state(dev); | ||
449 | |||
450 | return ret; | ||
451 | } | ||
452 | #endif | ||
453 | |||
454 | #ifdef CONFIG_PM_SLEEP | ||
455 | static int spi_st_suspend(struct device *dev) | ||
456 | { | ||
457 | struct spi_master *master = dev_get_drvdata(dev); | ||
458 | int ret; | ||
459 | |||
460 | ret = spi_master_suspend(master); | ||
461 | if (ret) | ||
462 | return ret; | ||
463 | |||
464 | return pm_runtime_force_suspend(dev); | ||
465 | } | ||
466 | |||
467 | static int spi_st_resume(struct device *dev) | ||
468 | { | ||
469 | struct spi_master *master = dev_get_drvdata(dev); | ||
470 | int ret; | ||
471 | |||
472 | ret = spi_master_resume(master); | ||
473 | if (ret) | ||
474 | return ret; | ||
475 | |||
476 | return pm_runtime_force_resume(dev); | ||
477 | } | ||
478 | #endif | ||
479 | |||
480 | static const struct dev_pm_ops spi_st_pm = { | ||
481 | SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) | ||
482 | SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) | ||
483 | }; | ||
484 | |||
485 | static struct of_device_id stm_spi_match[] = { | ||
486 | { .compatible = "st,comms-ssc4-spi", }, | ||
487 | {}, | ||
488 | }; | ||
489 | MODULE_DEVICE_TABLE(of, stm_spi_match); | ||
490 | |||
491 | static struct platform_driver spi_st_driver = { | ||
492 | .driver = { | ||
493 | .name = "spi-st", | ||
494 | .pm = &spi_st_pm, | ||
495 | .of_match_table = of_match_ptr(stm_spi_match), | ||
496 | }, | ||
497 | .probe = spi_st_probe, | ||
498 | .remove = spi_st_remove, | ||
499 | }; | ||
500 | module_platform_driver(spi_st_driver); | ||
501 | |||
502 | MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>"); | ||
503 | MODULE_DESCRIPTION("STM SSC SPI driver"); | ||
504 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 6146c4cd6583..884a716e50cb 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
@@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) | |||
201 | 201 | ||
202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | 202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) |
203 | { | 203 | { |
204 | int wlen, count, ret; | 204 | int wlen, count; |
205 | unsigned int cmd; | 205 | unsigned int cmd; |
206 | const u8 *txbuf; | 206 | const u8 *txbuf; |
207 | 207 | ||
@@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
230 | } | 230 | } |
231 | 231 | ||
232 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | 232 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); |
233 | ret = wait_for_completion_timeout(&qspi->transfer_complete, | 233 | if (!wait_for_completion_timeout(&qspi->transfer_complete, |
234 | QSPI_COMPLETION_TIMEOUT); | 234 | QSPI_COMPLETION_TIMEOUT)) { |
235 | if (ret == 0) { | ||
236 | dev_err(qspi->dev, "write timed out\n"); | 235 | dev_err(qspi->dev, "write timed out\n"); |
237 | return -ETIMEDOUT; | 236 | return -ETIMEDOUT; |
238 | } | 237 | } |
@@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
245 | 244 | ||
246 | static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | 245 | static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) |
247 | { | 246 | { |
248 | int wlen, count, ret; | 247 | int wlen, count; |
249 | unsigned int cmd; | 248 | unsigned int cmd; |
250 | u8 *rxbuf; | 249 | u8 *rxbuf; |
251 | 250 | ||
@@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
268 | while (count) { | 267 | while (count) { |
269 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); | 268 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); |
270 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | 269 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); |
271 | ret = wait_for_completion_timeout(&qspi->transfer_complete, | 270 | if (!wait_for_completion_timeout(&qspi->transfer_complete, |
272 | QSPI_COMPLETION_TIMEOUT); | 271 | QSPI_COMPLETION_TIMEOUT)) { |
273 | if (ret == 0) { | ||
274 | dev_err(qspi->dev, "read timed out\n"); | 272 | dev_err(qspi->dev, "read timed out\n"); |
275 | return -ETIMEDOUT; | 273 | return -ETIMEDOUT; |
276 | } | 274 | } |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index be692ad50442..93dfcee0f987 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -11,10 +11,6 @@ | |||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
18 | */ | 14 | */ |
19 | 15 | ||
20 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index 79bd84f43430..133f53a9c1d4 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/spi/xilinx_spi.h> | 22 | #include <linux/spi/xilinx_spi.h> |
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | 24 | ||
25 | #define XILINX_SPI_MAX_CS 32 | ||
26 | |||
25 | #define XILINX_SPI_NAME "xilinx_spi" | 27 | #define XILINX_SPI_NAME "xilinx_spi" |
26 | 28 | ||
27 | /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) | 29 | /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) |
@@ -34,7 +36,8 @@ | |||
34 | #define XSPI_CR_MASTER_MODE 0x04 | 36 | #define XSPI_CR_MASTER_MODE 0x04 |
35 | #define XSPI_CR_CPOL 0x08 | 37 | #define XSPI_CR_CPOL 0x08 |
36 | #define XSPI_CR_CPHA 0x10 | 38 | #define XSPI_CR_CPHA 0x10 |
37 | #define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL) | 39 | #define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \ |
40 | XSPI_CR_LSB_FIRST | XSPI_CR_LOOP) | ||
38 | #define XSPI_CR_TXFIFO_RESET 0x20 | 41 | #define XSPI_CR_TXFIFO_RESET 0x20 |
39 | #define XSPI_CR_RXFIFO_RESET 0x40 | 42 | #define XSPI_CR_RXFIFO_RESET 0x40 |
40 | #define XSPI_CR_MANUAL_SSELECT 0x80 | 43 | #define XSPI_CR_MANUAL_SSELECT 0x80 |
@@ -85,12 +88,11 @@ struct xilinx_spi { | |||
85 | 88 | ||
86 | u8 *rx_ptr; /* pointer in the Tx buffer */ | 89 | u8 *rx_ptr; /* pointer in the Tx buffer */ |
87 | const u8 *tx_ptr; /* pointer in the Rx buffer */ | 90 | const u8 *tx_ptr; /* pointer in the Rx buffer */ |
88 | int remaining_bytes; /* the number of bytes left to transfer */ | 91 | u8 bytes_per_word; |
89 | u8 bits_per_word; | 92 | int buffer_size; /* buffer size in words */ |
93 | u32 cs_inactive; /* Level of the CS pins when inactive*/ | ||
90 | unsigned int (*read_fn)(void __iomem *); | 94 | unsigned int (*read_fn)(void __iomem *); |
91 | void (*write_fn)(u32, void __iomem *); | 95 | void (*write_fn)(u32, void __iomem *); |
92 | void (*tx_fn)(struct xilinx_spi *); | ||
93 | void (*rx_fn)(struct xilinx_spi *); | ||
94 | }; | 96 | }; |
95 | 97 | ||
96 | static void xspi_write32(u32 val, void __iomem *addr) | 98 | static void xspi_write32(u32 val, void __iomem *addr) |
@@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr) | |||
113 | return ioread32be(addr); | 115 | return ioread32be(addr); |
114 | } | 116 | } |
115 | 117 | ||
116 | static void xspi_tx8(struct xilinx_spi *xspi) | 118 | static void xilinx_spi_tx(struct xilinx_spi *xspi) |
117 | { | 119 | { |
118 | xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); | 120 | u32 data = 0; |
119 | xspi->tx_ptr++; | ||
120 | } | ||
121 | |||
122 | static void xspi_tx16(struct xilinx_spi *xspi) | ||
123 | { | ||
124 | xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); | ||
125 | xspi->tx_ptr += 2; | ||
126 | } | ||
127 | 121 | ||
128 | static void xspi_tx32(struct xilinx_spi *xspi) | 122 | if (!xspi->tx_ptr) { |
129 | { | 123 | xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); |
130 | xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET); | 124 | return; |
131 | xspi->tx_ptr += 4; | ||
132 | } | ||
133 | |||
134 | static void xspi_rx8(struct xilinx_spi *xspi) | ||
135 | { | ||
136 | u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); | ||
137 | if (xspi->rx_ptr) { | ||
138 | *xspi->rx_ptr = data & 0xff; | ||
139 | xspi->rx_ptr++; | ||
140 | } | 125 | } |
141 | } | ||
142 | 126 | ||
143 | static void xspi_rx16(struct xilinx_spi *xspi) | 127 | switch (xspi->bytes_per_word) { |
144 | { | 128 | case 1: |
145 | u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); | 129 | data = *(u8 *)(xspi->tx_ptr); |
146 | if (xspi->rx_ptr) { | 130 | break; |
147 | *(u16 *)(xspi->rx_ptr) = data & 0xffff; | 131 | case 2: |
148 | xspi->rx_ptr += 2; | 132 | data = *(u16 *)(xspi->tx_ptr); |
133 | break; | ||
134 | case 4: | ||
135 | data = *(u32 *)(xspi->tx_ptr); | ||
136 | break; | ||
149 | } | 137 | } |
138 | |||
139 | xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET); | ||
140 | xspi->tx_ptr += xspi->bytes_per_word; | ||
150 | } | 141 | } |
151 | 142 | ||
152 | static void xspi_rx32(struct xilinx_spi *xspi) | 143 | static void xilinx_spi_rx(struct xilinx_spi *xspi) |
153 | { | 144 | { |
154 | u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); | 145 | u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET); |
155 | if (xspi->rx_ptr) { | 146 | |
147 | if (!xspi->rx_ptr) | ||
148 | return; | ||
149 | |||
150 | switch (xspi->bytes_per_word) { | ||
151 | case 1: | ||
152 | *(u8 *)(xspi->rx_ptr) = data; | ||
153 | break; | ||
154 | case 2: | ||
155 | *(u16 *)(xspi->rx_ptr) = data; | ||
156 | break; | ||
157 | case 4: | ||
156 | *(u32 *)(xspi->rx_ptr) = data; | 158 | *(u32 *)(xspi->rx_ptr) = data; |
157 | xspi->rx_ptr += 4; | 159 | break; |
158 | } | 160 | } |
161 | |||
162 | xspi->rx_ptr += xspi->bytes_per_word; | ||
159 | } | 163 | } |
160 | 164 | ||
161 | static void xspi_init_hw(struct xilinx_spi *xspi) | 165 | static void xspi_init_hw(struct xilinx_spi *xspi) |
@@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi) | |||
165 | /* Reset the SPI device */ | 169 | /* Reset the SPI device */ |
166 | xspi->write_fn(XIPIF_V123B_RESET_MASK, | 170 | xspi->write_fn(XIPIF_V123B_RESET_MASK, |
167 | regs_base + XIPIF_V123B_RESETR_OFFSET); | 171 | regs_base + XIPIF_V123B_RESETR_OFFSET); |
168 | /* Disable all the interrupts just in case */ | 172 | /* Enable the transmit empty interrupt, which we use to determine |
169 | xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET); | 173 | * progress on the transmission. |
170 | /* Enable the global IPIF interrupt */ | 174 | */ |
171 | xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, | 175 | xspi->write_fn(XSPI_INTR_TX_EMPTY, |
172 | regs_base + XIPIF_V123B_DGIER_OFFSET); | 176 | regs_base + XIPIF_V123B_IIER_OFFSET); |
177 | /* Disable the global IPIF interrupt */ | ||
178 | xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET); | ||
173 | /* Deselect the slave on the SPI bus */ | 179 | /* Deselect the slave on the SPI bus */ |
174 | xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); | 180 | xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET); |
175 | /* Disable the transmitter, enable Manual Slave Select Assertion, | 181 | /* Disable the transmitter, enable Manual Slave Select Assertion, |
176 | * put SPI controller into master mode, and enable it */ | 182 | * put SPI controller into master mode, and enable it */ |
177 | xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT | | 183 | xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE | |
178 | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | | 184 | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET, |
179 | XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET); | 185 | regs_base + XSPI_CR_OFFSET); |
180 | } | 186 | } |
181 | 187 | ||
182 | static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) | 188 | static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) |
183 | { | 189 | { |
184 | struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); | 190 | struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); |
191 | u16 cr; | ||
192 | u32 cs; | ||
185 | 193 | ||
186 | if (is_on == BITBANG_CS_INACTIVE) { | 194 | if (is_on == BITBANG_CS_INACTIVE) { |
187 | /* Deselect the slave on the SPI bus */ | 195 | /* Deselect the slave on the SPI bus */ |
188 | xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET); | 196 | xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET); |
189 | } else if (is_on == BITBANG_CS_ACTIVE) { | 197 | return; |
190 | /* Set the SPI clock phase and polarity */ | ||
191 | u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) | ||
192 | & ~XSPI_CR_MODE_MASK; | ||
193 | if (spi->mode & SPI_CPHA) | ||
194 | cr |= XSPI_CR_CPHA; | ||
195 | if (spi->mode & SPI_CPOL) | ||
196 | cr |= XSPI_CR_CPOL; | ||
197 | xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); | ||
198 | |||
199 | /* We do not check spi->max_speed_hz here as the SPI clock | ||
200 | * frequency is not software programmable (the IP block design | ||
201 | * parameter) | ||
202 | */ | ||
203 | |||
204 | /* Activate the chip select */ | ||
205 | xspi->write_fn(~(0x0001 << spi->chip_select), | ||
206 | xspi->regs + XSPI_SSR_OFFSET); | ||
207 | } | 198 | } |
199 | |||
200 | /* Set the SPI clock phase and polarity */ | ||
201 | cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK; | ||
202 | if (spi->mode & SPI_CPHA) | ||
203 | cr |= XSPI_CR_CPHA; | ||
204 | if (spi->mode & SPI_CPOL) | ||
205 | cr |= XSPI_CR_CPOL; | ||
206 | if (spi->mode & SPI_LSB_FIRST) | ||
207 | cr |= XSPI_CR_LSB_FIRST; | ||
208 | if (spi->mode & SPI_LOOP) | ||
209 | cr |= XSPI_CR_LOOP; | ||
210 | xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); | ||
211 | |||
212 | /* We do not check spi->max_speed_hz here as the SPI clock | ||
213 | * frequency is not software programmable (the IP block design | ||
214 | * parameter) | ||
215 | */ | ||
216 | |||
217 | cs = xspi->cs_inactive; | ||
218 | cs ^= BIT(spi->chip_select); | ||
219 | |||
220 | /* Activate the chip select */ | ||
221 | xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET); | ||
208 | } | 222 | } |
209 | 223 | ||
210 | /* spi_bitbang requires custom setup_transfer() to be defined if there is a | 224 | /* spi_bitbang requires custom setup_transfer() to be defined if there is a |
@@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) | |||
213 | static int xilinx_spi_setup_transfer(struct spi_device *spi, | 227 | static int xilinx_spi_setup_transfer(struct spi_device *spi, |
214 | struct spi_transfer *t) | 228 | struct spi_transfer *t) |
215 | { | 229 | { |
216 | return 0; | 230 | struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); |
217 | } | ||
218 | 231 | ||
219 | static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi) | 232 | if (spi->mode & SPI_CS_HIGH) |
220 | { | 233 | xspi->cs_inactive &= ~BIT(spi->chip_select); |
221 | u8 sr; | 234 | else |
235 | xspi->cs_inactive |= BIT(spi->chip_select); | ||
222 | 236 | ||
223 | /* Fill the Tx FIFO with as many bytes as possible */ | 237 | return 0; |
224 | sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); | ||
225 | while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { | ||
226 | if (xspi->tx_ptr) | ||
227 | xspi->tx_fn(xspi); | ||
228 | else | ||
229 | xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); | ||
230 | xspi->remaining_bytes -= xspi->bits_per_word / 8; | ||
231 | sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); | ||
232 | } | ||
233 | } | 238 | } |
234 | 239 | ||
235 | static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | 240 | static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) |
236 | { | 241 | { |
237 | struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); | 242 | struct xilinx_spi *xspi = spi_master_get_devdata(spi->master); |
238 | u32 ipif_ier; | 243 | int remaining_words; /* the number of words left to transfer */ |
244 | bool use_irq = false; | ||
245 | u16 cr = 0; | ||
239 | 246 | ||
240 | /* We get here with transmitter inhibited */ | 247 | /* We get here with transmitter inhibited */ |
241 | 248 | ||
242 | xspi->tx_ptr = t->tx_buf; | 249 | xspi->tx_ptr = t->tx_buf; |
243 | xspi->rx_ptr = t->rx_buf; | 250 | xspi->rx_ptr = t->rx_buf; |
244 | xspi->remaining_bytes = t->len; | 251 | remaining_words = t->len / xspi->bytes_per_word; |
245 | reinit_completion(&xspi->done); | 252 | reinit_completion(&xspi->done); |
246 | 253 | ||
254 | if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) { | ||
255 | use_irq = true; | ||
256 | xspi->write_fn(XSPI_INTR_TX_EMPTY, | ||
257 | xspi->regs + XIPIF_V123B_IISR_OFFSET); | ||
258 | /* Enable the global IPIF interrupt */ | ||
259 | xspi->write_fn(XIPIF_V123B_GINTR_ENABLE, | ||
260 | xspi->regs + XIPIF_V123B_DGIER_OFFSET); | ||
261 | /* Inhibit irq to avoid spurious irqs on tx_empty*/ | ||
262 | cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); | ||
263 | xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, | ||
264 | xspi->regs + XSPI_CR_OFFSET); | ||
265 | } | ||
247 | 266 | ||
248 | /* Enable the transmit empty interrupt, which we use to determine | 267 | while (remaining_words) { |
249 | * progress on the transmission. | 268 | int n_words, tx_words, rx_words; |
250 | */ | ||
251 | ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET); | ||
252 | xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY, | ||
253 | xspi->regs + XIPIF_V123B_IIER_OFFSET); | ||
254 | 269 | ||
255 | for (;;) { | 270 | n_words = min(remaining_words, xspi->buffer_size); |
256 | u16 cr; | ||
257 | u8 sr; | ||
258 | 271 | ||
259 | xilinx_spi_fill_tx_fifo(xspi); | 272 | tx_words = n_words; |
273 | while (tx_words--) | ||
274 | xilinx_spi_tx(xspi); | ||
260 | 275 | ||
261 | /* Start the transfer by not inhibiting the transmitter any | 276 | /* Start the transfer by not inhibiting the transmitter any |
262 | * longer | 277 | * longer |
263 | */ | 278 | */ |
264 | cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & | ||
265 | ~XSPI_CR_TRANS_INHIBIT; | ||
266 | xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); | ||
267 | 279 | ||
268 | wait_for_completion(&xspi->done); | 280 | if (use_irq) { |
281 | xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET); | ||
282 | wait_for_completion(&xspi->done); | ||
283 | } else | ||
284 | while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) & | ||
285 | XSPI_SR_TX_EMPTY_MASK)) | ||
286 | ; | ||
269 | 287 | ||
270 | /* A transmit has just completed. Process received data and | 288 | /* A transmit has just completed. Process received data and |
271 | * check for more data to transmit. Always inhibit the | 289 | * check for more data to transmit. Always inhibit the |
272 | * transmitter while the Isr refills the transmit register/FIFO, | 290 | * transmitter while the Isr refills the transmit register/FIFO, |
273 | * or make sure it is stopped if we're done. | 291 | * or make sure it is stopped if we're done. |
274 | */ | 292 | */ |
275 | cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET); | 293 | if (use_irq) |
276 | xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, | 294 | xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT, |
277 | xspi->regs + XSPI_CR_OFFSET); | 295 | xspi->regs + XSPI_CR_OFFSET); |
278 | 296 | ||
279 | /* Read out all the data from the Rx FIFO */ | 297 | /* Read out all the data from the Rx FIFO */ |
280 | sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); | 298 | rx_words = n_words; |
281 | while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { | 299 | while (rx_words--) |
282 | xspi->rx_fn(xspi); | 300 | xilinx_spi_rx(xspi); |
283 | sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); | 301 | |
284 | } | 302 | remaining_words -= n_words; |
285 | |||
286 | /* See if there is more data to send */ | ||
287 | if (xspi->remaining_bytes <= 0) | ||
288 | break; | ||
289 | } | 303 | } |
290 | 304 | ||
291 | /* Disable the transmit empty interrupt */ | 305 | if (use_irq) |
292 | xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET); | 306 | xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET); |
293 | 307 | ||
294 | return t->len - xspi->remaining_bytes; | 308 | return t->len; |
295 | } | 309 | } |
296 | 310 | ||
297 | 311 | ||
@@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) | |||
316 | return IRQ_HANDLED; | 330 | return IRQ_HANDLED; |
317 | } | 331 | } |
318 | 332 | ||
333 | static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi) | ||
334 | { | ||
335 | u8 sr; | ||
336 | int n_words = 0; | ||
337 | |||
338 | /* | ||
339 | * Before the buffer_size detection we reset the core | ||
340 | * to make sure we start with a clean state. | ||
341 | */ | ||
342 | xspi->write_fn(XIPIF_V123B_RESET_MASK, | ||
343 | xspi->regs + XIPIF_V123B_RESETR_OFFSET); | ||
344 | |||
345 | /* Fill the Tx FIFO with as many words as possible */ | ||
346 | do { | ||
347 | xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET); | ||
348 | sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET); | ||
349 | n_words++; | ||
350 | } while (!(sr & XSPI_SR_TX_FULL_MASK)); | ||
351 | |||
352 | return n_words; | ||
353 | } | ||
354 | |||
319 | static const struct of_device_id xilinx_spi_of_match[] = { | 355 | static const struct of_device_id xilinx_spi_of_match[] = { |
320 | { .compatible = "xlnx,xps-spi-2.00.a", }, | 356 | { .compatible = "xlnx,xps-spi-2.00.a", }, |
321 | { .compatible = "xlnx,xps-spi-2.00.b", }, | 357 | { .compatible = "xlnx,xps-spi-2.00.b", }, |
@@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev) | |||
348 | return -EINVAL; | 384 | return -EINVAL; |
349 | } | 385 | } |
350 | 386 | ||
387 | if (num_cs > XILINX_SPI_MAX_CS) { | ||
388 | dev_err(&pdev->dev, "Invalid number of spi slaves\n"); | ||
389 | return -EINVAL; | ||
390 | } | ||
391 | |||
351 | master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); | 392 | master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi)); |
352 | if (!master) | 393 | if (!master) |
353 | return -ENODEV; | 394 | return -ENODEV; |
354 | 395 | ||
355 | /* the spi->mode bits understood by this driver: */ | 396 | /* the spi->mode bits understood by this driver: */ |
356 | master->mode_bits = SPI_CPOL | SPI_CPHA; | 397 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | |
398 | SPI_CS_HIGH; | ||
357 | 399 | ||
358 | xspi = spi_master_get_devdata(master); | 400 | xspi = spi_master_get_devdata(master); |
401 | xspi->cs_inactive = 0xffffffff; | ||
359 | xspi->bitbang.master = master; | 402 | xspi->bitbang.master = master; |
360 | xspi->bitbang.chipselect = xilinx_spi_chipselect; | 403 | xspi->bitbang.chipselect = xilinx_spi_chipselect; |
361 | xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; | 404 | xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer; |
@@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev) | |||
392 | } | 435 | } |
393 | 436 | ||
394 | master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); | 437 | master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word); |
395 | xspi->bits_per_word = bits_per_word; | 438 | xspi->bytes_per_word = bits_per_word / 8; |
396 | if (xspi->bits_per_word == 8) { | 439 | xspi->buffer_size = xilinx_spi_find_buffer_size(xspi); |
397 | xspi->tx_fn = xspi_tx8; | ||
398 | xspi->rx_fn = xspi_rx8; | ||
399 | } else if (xspi->bits_per_word == 16) { | ||
400 | xspi->tx_fn = xspi_tx16; | ||
401 | xspi->rx_fn = xspi_rx16; | ||
402 | } else if (xspi->bits_per_word == 32) { | ||
403 | xspi->tx_fn = xspi_tx32; | ||
404 | xspi->rx_fn = xspi_rx32; | ||
405 | } else { | ||
406 | ret = -EINVAL; | ||
407 | goto put_master; | ||
408 | } | ||
409 | |||
410 | /* SPI controller initializations */ | ||
411 | xspi_init_hw(xspi); | ||
412 | 440 | ||
413 | xspi->irq = platform_get_irq(pdev, 0); | 441 | xspi->irq = platform_get_irq(pdev, 0); |
414 | if (xspi->irq < 0) { | 442 | if (xspi->irq >= 0) { |
415 | ret = xspi->irq; | 443 | /* Register for SPI Interrupt */ |
416 | goto put_master; | 444 | ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, |
445 | dev_name(&pdev->dev), xspi); | ||
446 | if (ret) | ||
447 | goto put_master; | ||
417 | } | 448 | } |
418 | 449 | ||
419 | /* Register for SPI Interrupt */ | 450 | /* SPI controller initializations */ |
420 | ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0, | 451 | xspi_init_hw(xspi); |
421 | dev_name(&pdev->dev), xspi); | ||
422 | if (ret) | ||
423 | goto put_master; | ||
424 | 452 | ||
425 | ret = spi_bitbang_start(&xspi->bitbang); | 453 | ret = spi_bitbang_start(&xspi->bitbang); |
426 | if (ret) { | 454 | if (ret) { |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 66a70e9bc743..c64a3e59fce3 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -13,10 +13,6 @@ | |||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | 16 | */ |
21 | 17 | ||
22 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
@@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master, | |||
788 | struct spi_transfer *xfer; | 784 | struct spi_transfer *xfer; |
789 | bool keep_cs = false; | 785 | bool keep_cs = false; |
790 | int ret = 0; | 786 | int ret = 0; |
791 | int ms = 1; | 787 | unsigned long ms = 1; |
792 | 788 | ||
793 | spi_set_cs(msg->spi, true); | 789 | spi_set_cs(msg->spi, true); |
794 | 790 | ||
@@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master) | |||
875 | EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); | 871 | EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); |
876 | 872 | ||
877 | /** | 873 | /** |
878 | * spi_pump_messages - kthread work function which processes spi message queue | 874 | * __spi_pump_messages - function which processes spi message queue |
879 | * @work: pointer to kthread work struct contained in the master struct | 875 | * @master: master to process queue for |
876 | * @in_kthread: true if we are in the context of the message pump thread | ||
880 | * | 877 | * |
881 | * This function checks if there is any spi message in the queue that | 878 | * This function checks if there is any spi message in the queue that |
882 | * needs processing and if so call out to the driver to initialize hardware | 879 | * needs processing and if so call out to the driver to initialize hardware |
883 | * and transfer each message. | 880 | * and transfer each message. |
884 | * | 881 | * |
882 | * Note that it is called both from the kthread itself and also from | ||
883 | * inside spi_sync(); the queue extraction handling at the top of the | ||
884 | * function should deal with this safely. | ||
885 | */ | 885 | */ |
886 | static void spi_pump_messages(struct kthread_work *work) | 886 | static void __spi_pump_messages(struct spi_master *master, bool in_kthread) |
887 | { | 887 | { |
888 | struct spi_master *master = | ||
889 | container_of(work, struct spi_master, pump_messages); | ||
890 | unsigned long flags; | 888 | unsigned long flags; |
891 | bool was_busy = false; | 889 | bool was_busy = false; |
892 | int ret; | 890 | int ret; |
893 | 891 | ||
894 | /* Lock queue and check for queue work */ | 892 | /* Lock queue */ |
895 | spin_lock_irqsave(&master->queue_lock, flags); | 893 | spin_lock_irqsave(&master->queue_lock, flags); |
894 | |||
895 | /* Make sure we are not already running a message */ | ||
896 | if (master->cur_msg) { | ||
897 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
898 | return; | ||
899 | } | ||
900 | |||
901 | /* If another context is idling the device then defer */ | ||
902 | if (master->idling) { | ||
903 | queue_kthread_work(&master->kworker, &master->pump_messages); | ||
904 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
905 | return; | ||
906 | } | ||
907 | |||
908 | /* Check if the queue is idle */ | ||
896 | if (list_empty(&master->queue) || !master->running) { | 909 | if (list_empty(&master->queue) || !master->running) { |
897 | if (!master->busy) { | 910 | if (!master->busy) { |
898 | spin_unlock_irqrestore(&master->queue_lock, flags); | 911 | spin_unlock_irqrestore(&master->queue_lock, flags); |
899 | return; | 912 | return; |
900 | } | 913 | } |
914 | |||
915 | /* Only do teardown in the thread */ | ||
916 | if (!in_kthread) { | ||
917 | queue_kthread_work(&master->kworker, | ||
918 | &master->pump_messages); | ||
919 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
920 | return; | ||
921 | } | ||
922 | |||
901 | master->busy = false; | 923 | master->busy = false; |
924 | master->idling = true; | ||
902 | spin_unlock_irqrestore(&master->queue_lock, flags); | 925 | spin_unlock_irqrestore(&master->queue_lock, flags); |
926 | |||
903 | kfree(master->dummy_rx); | 927 | kfree(master->dummy_rx); |
904 | master->dummy_rx = NULL; | 928 | master->dummy_rx = NULL; |
905 | kfree(master->dummy_tx); | 929 | kfree(master->dummy_tx); |
@@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work) | |||
913 | pm_runtime_put_autosuspend(master->dev.parent); | 937 | pm_runtime_put_autosuspend(master->dev.parent); |
914 | } | 938 | } |
915 | trace_spi_master_idle(master); | 939 | trace_spi_master_idle(master); |
916 | return; | ||
917 | } | ||
918 | 940 | ||
919 | /* Make sure we are not already running a message */ | 941 | spin_lock_irqsave(&master->queue_lock, flags); |
920 | if (master->cur_msg) { | 942 | master->idling = false; |
921 | spin_unlock_irqrestore(&master->queue_lock, flags); | 943 | spin_unlock_irqrestore(&master->queue_lock, flags); |
922 | return; | 944 | return; |
923 | } | 945 | } |
946 | |||
924 | /* Extract head of queue */ | 947 | /* Extract head of queue */ |
925 | master->cur_msg = | 948 | master->cur_msg = |
926 | list_first_entry(&master->queue, struct spi_message, queue); | 949 | list_first_entry(&master->queue, struct spi_message, queue); |
@@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work) | |||
985 | } | 1008 | } |
986 | } | 1009 | } |
987 | 1010 | ||
1011 | /** | ||
1012 | * spi_pump_messages - kthread work function which processes spi message queue | ||
1013 | * @work: pointer to kthread work struct contained in the master struct | ||
1014 | */ | ||
1015 | static void spi_pump_messages(struct kthread_work *work) | ||
1016 | { | ||
1017 | struct spi_master *master = | ||
1018 | container_of(work, struct spi_master, pump_messages); | ||
1019 | |||
1020 | __spi_pump_messages(master, true); | ||
1021 | } | ||
1022 | |||
988 | static int spi_init_queue(struct spi_master *master) | 1023 | static int spi_init_queue(struct spi_master *master) |
989 | { | 1024 | { |
990 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | 1025 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
991 | 1026 | ||
992 | INIT_LIST_HEAD(&master->queue); | ||
993 | spin_lock_init(&master->queue_lock); | ||
994 | |||
995 | master->running = false; | 1027 | master->running = false; |
996 | master->busy = false; | 1028 | master->busy = false; |
997 | 1029 | ||
@@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master) | |||
1161 | return 0; | 1193 | return 0; |
1162 | } | 1194 | } |
1163 | 1195 | ||
1164 | /** | 1196 | static int __spi_queued_transfer(struct spi_device *spi, |
1165 | * spi_queued_transfer - transfer function for queued transfers | 1197 | struct spi_message *msg, |
1166 | * @spi: spi device which is requesting transfer | 1198 | bool need_pump) |
1167 | * @msg: spi message which is to handled is queued to driver queue | ||
1168 | */ | ||
1169 | static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) | ||
1170 | { | 1199 | { |
1171 | struct spi_master *master = spi->master; | 1200 | struct spi_master *master = spi->master; |
1172 | unsigned long flags; | 1201 | unsigned long flags; |
@@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) | |||
1181 | msg->status = -EINPROGRESS; | 1210 | msg->status = -EINPROGRESS; |
1182 | 1211 | ||
1183 | list_add_tail(&msg->queue, &master->queue); | 1212 | list_add_tail(&msg->queue, &master->queue); |
1184 | if (!master->busy) | 1213 | if (!master->busy && need_pump) |
1185 | queue_kthread_work(&master->kworker, &master->pump_messages); | 1214 | queue_kthread_work(&master->kworker, &master->pump_messages); |
1186 | 1215 | ||
1187 | spin_unlock_irqrestore(&master->queue_lock, flags); | 1216 | spin_unlock_irqrestore(&master->queue_lock, flags); |
1188 | return 0; | 1217 | return 0; |
1189 | } | 1218 | } |
1190 | 1219 | ||
1220 | /** | ||
1221 | * spi_queued_transfer - transfer function for queued transfers | ||
1222 | * @spi: spi device which is requesting transfer | ||
1223 | * @msg: spi message which is to handled is queued to driver queue | ||
1224 | */ | ||
1225 | static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) | ||
1226 | { | ||
1227 | return __spi_queued_transfer(spi, msg, true); | ||
1228 | } | ||
1229 | |||
1191 | static int spi_master_initialize_queue(struct spi_master *master) | 1230 | static int spi_master_initialize_queue(struct spi_master *master) |
1192 | { | 1231 | { |
1193 | int ret; | 1232 | int ret; |
@@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master) | |||
1609 | dynamic = 1; | 1648 | dynamic = 1; |
1610 | } | 1649 | } |
1611 | 1650 | ||
1651 | INIT_LIST_HEAD(&master->queue); | ||
1652 | spin_lock_init(&master->queue_lock); | ||
1612 | spin_lock_init(&master->bus_lock_spinlock); | 1653 | spin_lock_init(&master->bus_lock_spinlock); |
1613 | mutex_init(&master->bus_lock_mutex); | 1654 | mutex_init(&master->bus_lock_mutex); |
1614 | master->bus_lock_flag = 0; | 1655 | master->bus_lock_flag = 0; |
@@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, | |||
2114 | DECLARE_COMPLETION_ONSTACK(done); | 2155 | DECLARE_COMPLETION_ONSTACK(done); |
2115 | int status; | 2156 | int status; |
2116 | struct spi_master *master = spi->master; | 2157 | struct spi_master *master = spi->master; |
2158 | unsigned long flags; | ||
2159 | |||
2160 | status = __spi_validate(spi, message); | ||
2161 | if (status != 0) | ||
2162 | return status; | ||
2117 | 2163 | ||
2118 | message->complete = spi_complete; | 2164 | message->complete = spi_complete; |
2119 | message->context = &done; | 2165 | message->context = &done; |
2166 | message->spi = spi; | ||
2120 | 2167 | ||
2121 | if (!bus_locked) | 2168 | if (!bus_locked) |
2122 | mutex_lock(&master->bus_lock_mutex); | 2169 | mutex_lock(&master->bus_lock_mutex); |
2123 | 2170 | ||
2124 | status = spi_async_locked(spi, message); | 2171 | /* If we're not using the legacy transfer method then we will |
2172 | * try to transfer in the calling context so special case. | ||
2173 | * This code would be less tricky if we could remove the | ||
2174 | * support for driver implemented message queues. | ||
2175 | */ | ||
2176 | if (master->transfer == spi_queued_transfer) { | ||
2177 | spin_lock_irqsave(&master->bus_lock_spinlock, flags); | ||
2178 | |||
2179 | trace_spi_message_submit(message); | ||
2180 | |||
2181 | status = __spi_queued_transfer(spi, message, false); | ||
2182 | |||
2183 | spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); | ||
2184 | } else { | ||
2185 | status = spi_async_locked(spi, message); | ||
2186 | } | ||
2125 | 2187 | ||
2126 | if (!bus_locked) | 2188 | if (!bus_locked) |
2127 | mutex_unlock(&master->bus_lock_mutex); | 2189 | mutex_unlock(&master->bus_lock_mutex); |
2128 | 2190 | ||
2129 | if (status == 0) { | 2191 | if (status == 0) { |
2192 | /* Push out the messages in the calling context if we | ||
2193 | * can. | ||
2194 | */ | ||
2195 | if (master->transfer == spi_queued_transfer) | ||
2196 | __spi_pump_messages(master, false); | ||
2197 | |||
2130 | wait_for_completion(&done); | 2198 | wait_for_completion(&done); |
2131 | status = message->status; | 2199 | status = message->status; |
2132 | } | 2200 | } |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 6941e04afb8c..4eb7a980e670 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -14,10 +14,6 @@ | |||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | 17 | */ |
22 | 18 | ||
23 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -317,6 +313,37 @@ done: | |||
317 | return status; | 313 | return status; |
318 | } | 314 | } |
319 | 315 | ||
316 | static struct spi_ioc_transfer * | ||
317 | spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc, | ||
318 | unsigned *n_ioc) | ||
319 | { | ||
320 | struct spi_ioc_transfer *ioc; | ||
321 | u32 tmp; | ||
322 | |||
323 | /* Check type, command number and direction */ | ||
324 | if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC | ||
325 | || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) | ||
326 | || _IOC_DIR(cmd) != _IOC_WRITE) | ||
327 | return ERR_PTR(-ENOTTY); | ||
328 | |||
329 | tmp = _IOC_SIZE(cmd); | ||
330 | if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) | ||
331 | return ERR_PTR(-EINVAL); | ||
332 | *n_ioc = tmp / sizeof(struct spi_ioc_transfer); | ||
333 | if (*n_ioc == 0) | ||
334 | return NULL; | ||
335 | |||
336 | /* copy into scratch area */ | ||
337 | ioc = kmalloc(tmp, GFP_KERNEL); | ||
338 | if (!ioc) | ||
339 | return ERR_PTR(-ENOMEM); | ||
340 | if (__copy_from_user(ioc, u_ioc, tmp)) { | ||
341 | kfree(ioc); | ||
342 | return ERR_PTR(-EFAULT); | ||
343 | } | ||
344 | return ioc; | ||
345 | } | ||
346 | |||
320 | static long | 347 | static long |
321 | spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 348 | spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
322 | { | 349 | { |
@@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
456 | 483 | ||
457 | default: | 484 | default: |
458 | /* segmented and/or full-duplex I/O request */ | 485 | /* segmented and/or full-duplex I/O request */ |
459 | if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) | 486 | /* Check message and copy into scratch area */ |
460 | || _IOC_DIR(cmd) != _IOC_WRITE) { | 487 | ioc = spidev_get_ioc_message(cmd, |
461 | retval = -ENOTTY; | 488 | (struct spi_ioc_transfer __user *)arg, &n_ioc); |
462 | break; | 489 | if (IS_ERR(ioc)) { |
463 | } | 490 | retval = PTR_ERR(ioc); |
464 | |||
465 | tmp = _IOC_SIZE(cmd); | ||
466 | if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) { | ||
467 | retval = -EINVAL; | ||
468 | break; | ||
469 | } | ||
470 | n_ioc = tmp / sizeof(struct spi_ioc_transfer); | ||
471 | if (n_ioc == 0) | ||
472 | break; | ||
473 | |||
474 | /* copy into scratch area */ | ||
475 | ioc = kmalloc(tmp, GFP_KERNEL); | ||
476 | if (!ioc) { | ||
477 | retval = -ENOMEM; | ||
478 | break; | ||
479 | } | ||
480 | if (__copy_from_user(ioc, (void __user *)arg, tmp)) { | ||
481 | kfree(ioc); | ||
482 | retval = -EFAULT; | ||
483 | break; | 491 | break; |
484 | } | 492 | } |
493 | if (!ioc) | ||
494 | break; /* n_ioc is also 0 */ | ||
485 | 495 | ||
486 | /* translate to spi_message, execute */ | 496 | /* translate to spi_message, execute */ |
487 | retval = spidev_message(spidev, ioc, n_ioc); | 497 | retval = spidev_message(spidev, ioc, n_ioc); |
@@ -496,8 +506,67 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
496 | 506 | ||
497 | #ifdef CONFIG_COMPAT | 507 | #ifdef CONFIG_COMPAT |
498 | static long | 508 | static long |
509 | spidev_compat_ioc_message(struct file *filp, unsigned int cmd, | ||
510 | unsigned long arg) | ||
511 | { | ||
512 | struct spi_ioc_transfer __user *u_ioc; | ||
513 | int retval = 0; | ||
514 | struct spidev_data *spidev; | ||
515 | struct spi_device *spi; | ||
516 | unsigned n_ioc, n; | ||
517 | struct spi_ioc_transfer *ioc; | ||
518 | |||
519 | u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg); | ||
520 | if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd))) | ||
521 | return -EFAULT; | ||
522 | |||
523 | /* guard against device removal before, or while, | ||
524 | * we issue this ioctl. | ||
525 | */ | ||
526 | spidev = filp->private_data; | ||
527 | spin_lock_irq(&spidev->spi_lock); | ||
528 | spi = spi_dev_get(spidev->spi); | ||
529 | spin_unlock_irq(&spidev->spi_lock); | ||
530 | |||
531 | if (spi == NULL) | ||
532 | return -ESHUTDOWN; | ||
533 | |||
534 | /* SPI_IOC_MESSAGE needs the buffer locked "normally" */ | ||
535 | mutex_lock(&spidev->buf_lock); | ||
536 | |||
537 | /* Check message and copy into scratch area */ | ||
538 | ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc); | ||
539 | if (IS_ERR(ioc)) { | ||
540 | retval = PTR_ERR(ioc); | ||
541 | goto done; | ||
542 | } | ||
543 | if (!ioc) | ||
544 | goto done; /* n_ioc is also 0 */ | ||
545 | |||
546 | /* Convert buffer pointers */ | ||
547 | for (n = 0; n < n_ioc; n++) { | ||
548 | ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf); | ||
549 | ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf); | ||
550 | } | ||
551 | |||
552 | /* translate to spi_message, execute */ | ||
553 | retval = spidev_message(spidev, ioc, n_ioc); | ||
554 | kfree(ioc); | ||
555 | |||
556 | done: | ||
557 | mutex_unlock(&spidev->buf_lock); | ||
558 | spi_dev_put(spi); | ||
559 | return retval; | ||
560 | } | ||
561 | |||
562 | static long | ||
499 | spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | 563 | spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
500 | { | 564 | { |
565 | if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC | ||
566 | && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0)) | ||
567 | && _IOC_DIR(cmd) == _IOC_WRITE) | ||
568 | return spidev_compat_ioc_message(filp, cmd, arg); | ||
569 | |||
501 | return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | 570 | return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); |
502 | } | 571 | } |
503 | #else | 572 | #else |
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 930f6010203e..65d610abe06e 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c | |||
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) | |||
632 | return 0; | 632 | return 0; |
633 | } | 633 | } |
634 | 634 | ||
635 | if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { | 635 | if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { |
636 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); | 636 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); |
637 | return -EFAULT; | 637 | return -EFAULT; |
638 | } | 638 | } |
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index 093535c6217b..120b70d72d79 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c | |||
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle; | |||
85 | static const struct mfd_cell nvec_devices[] = { | 85 | static const struct mfd_cell nvec_devices[] = { |
86 | { | 86 | { |
87 | .name = "nvec-kbd", | 87 | .name = "nvec-kbd", |
88 | .id = 1, | ||
89 | }, | 88 | }, |
90 | { | 89 | { |
91 | .name = "nvec-mouse", | 90 | .name = "nvec-mouse", |
92 | .id = 1, | ||
93 | }, | 91 | }, |
94 | { | 92 | { |
95 | .name = "nvec-power", | 93 | .name = "nvec-power", |
96 | .id = 1, | 94 | .id = 0, |
97 | }, | 95 | }, |
98 | { | 96 | { |
99 | .name = "nvec-power", | 97 | .name = "nvec-power", |
100 | .id = 2, | 98 | .id = 1, |
101 | }, | 99 | }, |
102 | { | 100 | { |
103 | .name = "nvec-paz00", | 101 | .name = "nvec-paz00", |
104 | .id = 1, | ||
105 | }, | 102 | }, |
106 | }; | 103 | }; |
107 | 104 | ||
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) | |||
891 | nvec_msg_free(nvec, msg); | 888 | nvec_msg_free(nvec, msg); |
892 | } | 889 | } |
893 | 890 | ||
894 | ret = mfd_add_devices(nvec->dev, -1, nvec_devices, | 891 | ret = mfd_add_devices(nvec->dev, 0, nvec_devices, |
895 | ARRAY_SIZE(nvec_devices), NULL, 0, NULL); | 892 | ARRAY_SIZE(nvec_devices), NULL, 0, NULL); |
896 | if (ret) | 893 | if (ret) |
897 | dev_err(nvec->dev, "error adding subdevices\n"); | 894 | dev_err(nvec->dev, "error adding subdevices\n"); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index aeb50bb6ba9c..eaffb0248de1 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -3452,8 +3452,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) | |||
3452 | return status; | 3452 | return status; |
3453 | } | 3453 | } |
3454 | 3454 | ||
3455 | #ifdef CONFIG_PM | ||
3456 | |||
3457 | int usb_remote_wakeup(struct usb_device *udev) | 3455 | int usb_remote_wakeup(struct usb_device *udev) |
3458 | { | 3456 | { |
3459 | int status = 0; | 3457 | int status = 0; |
@@ -3512,16 +3510,6 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, | |||
3512 | return connect_change; | 3510 | return connect_change; |
3513 | } | 3511 | } |
3514 | 3512 | ||
3515 | #else | ||
3516 | |||
3517 | static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port, | ||
3518 | u16 portstatus, u16 portchange) | ||
3519 | { | ||
3520 | return 0; | ||
3521 | } | ||
3522 | |||
3523 | #endif | ||
3524 | |||
3525 | static int check_ports_changed(struct usb_hub *hub) | 3513 | static int check_ports_changed(struct usb_hub *hub) |
3526 | { | 3514 | { |
3527 | int port1; | 3515 | int port1; |
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h index de0c9c9d7091..a6315abe7b7c 100644 --- a/drivers/usb/core/otg_whitelist.h +++ b/drivers/usb/core/otg_whitelist.h | |||
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev) | |||
55 | le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) | 55 | le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) |
56 | return 0; | 56 | return 0; |
57 | 57 | ||
58 | /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */ | ||
59 | if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a && | ||
60 | le16_to_cpu(dev->descriptor.idProduct) == 0x0200)) | ||
61 | return 1; | ||
62 | |||
58 | /* NOTE: can't use usb_match_id() since interface caches | 63 | /* NOTE: can't use usb_match_id() since interface caches |
59 | * aren't set up yet. this is cut/paste from that code. | 64 | * aren't set up yet. this is cut/paste from that code. |
60 | */ | 65 | */ |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 0ffb4ed0a945..41e510ae8c83 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
179 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = | 179 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = |
180 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, | 180 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, |
181 | 181 | ||
182 | /* Protocol and OTG Electrical Test Device */ | ||
183 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | ||
184 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | ||
185 | |||
182 | { } /* terminating entry must be last */ | 186 | { } /* terminating entry must be last */ |
183 | }; | 187 | }; |
184 | 188 | ||
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index ad43c5bc1ef1..02e3e2d4ea56 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c | |||
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) | |||
476 | u32 gintsts; | 476 | u32 gintsts; |
477 | irqreturn_t retval = IRQ_NONE; | 477 | irqreturn_t retval = IRQ_NONE; |
478 | 478 | ||
479 | spin_lock(&hsotg->lock); | ||
480 | |||
479 | if (!dwc2_is_controller_alive(hsotg)) { | 481 | if (!dwc2_is_controller_alive(hsotg)) { |
480 | dev_warn(hsotg->dev, "Controller is dead\n"); | 482 | dev_warn(hsotg->dev, "Controller is dead\n"); |
481 | goto out; | 483 | goto out; |
482 | } | 484 | } |
483 | 485 | ||
484 | spin_lock(&hsotg->lock); | ||
485 | |||
486 | gintsts = dwc2_read_common_intr(hsotg); | 486 | gintsts = dwc2_read_common_intr(hsotg); |
487 | if (gintsts & ~GINTSTS_PRTINT) | 487 | if (gintsts & ~GINTSTS_PRTINT) |
488 | retval = IRQ_HANDLED; | 488 | retval = IRQ_HANDLED; |
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) | |||
515 | } | 515 | } |
516 | } | 516 | } |
517 | 517 | ||
518 | spin_unlock(&hsotg->lock); | ||
519 | out: | 518 | out: |
519 | spin_unlock(&hsotg->lock); | ||
520 | return retval; | 520 | return retval; |
521 | } | 521 | } |
522 | EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); | 522 | EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); |
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index ccfdfb24b240..2f9735b35338 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c | |||
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list, | |||
34 | return phy; | 34 | return phy; |
35 | } | 35 | } |
36 | 36 | ||
37 | return ERR_PTR(-EPROBE_DEFER); | 37 | return ERR_PTR(-ENODEV); |
38 | } | 38 | } |
39 | 39 | ||
40 | static struct usb_phy *__usb_find_phy_dev(struct device *dev, | 40 | static struct usb_phy *__usb_find_phy_dev(struct device *dev, |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 11c7a9676441..d684b4b8108f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100, | |||
507 | UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, | 507 | UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, |
508 | "SCM Microsystems", | 508 | "SCM Microsystems", |
509 | "eUSB SCSI Adapter (Bus Powered)", | 509 | "eUSB SCSI Adapter (Bus Powered)", |
510 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, | 510 | USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init, |
511 | US_FL_SCM_MULT_TARG ), | 511 | US_FL_SCM_MULT_TARG ), |
512 | 512 | ||
513 | UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, | 513 | UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, |
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100, | |||
1995 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1995 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
1996 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), | 1996 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), |
1997 | 1997 | ||
1998 | /* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */ | ||
1999 | UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, | ||
2000 | "JMicron", | ||
2001 | "USB to ATA/ATAPI Bridge", | ||
2002 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
2003 | US_FL_BROKEN_FUA ), | ||
2004 | |||
1998 | /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) | 2005 | /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) |
1999 | * and Mac USB Dock USB-SCSI */ | 2006 | * and Mac USB Dock USB-SCSI */ |
2000 | UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, | 2007 | UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 6df4357d9ee3..dbc00e56c7f5 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
@@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, | |||
140 | "External HDD", | 140 | "External HDD", |
141 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 141 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
142 | US_FL_IGNORE_UAS), | 142 | US_FL_IGNORE_UAS), |
143 | |||
144 | /* Reported-by: Richard Henderson <rth@redhat.com> */ | ||
145 | UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999, | ||
146 | "SimpleTech", | ||
147 | "External HDD", | ||
148 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
149 | US_FL_NO_REPORT_OPCODES), | ||
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index d415d69dc237..9484d5652ca5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net) | |||
650 | break; | 650 | break; |
651 | } | 651 | } |
652 | /* TODO: Should check and handle checksum. */ | 652 | /* TODO: Should check and handle checksum. */ |
653 | |||
654 | hdr.num_buffers = cpu_to_vhost16(vq, headcount); | ||
653 | if (likely(mergeable) && | 655 | if (likely(mergeable) && |
654 | memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, | 656 | memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers, |
655 | offsetof(typeof(hdr), num_buffers), | 657 | offsetof(typeof(hdr), num_buffers), |
656 | sizeof hdr.num_buffers)) { | 658 | sizeof hdr.num_buffers)) { |
657 | vq_err(vq, "Failed num_buffers write"); | 659 | vq_err(vq, "Failed num_buffers write"); |
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index 4953b657635e..cb9ee2556850 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c | |||
@@ -3118,8 +3118,7 @@ int __init atafb_init(void) | |||
3118 | printk("atafb_init: initializing Falcon hw\n"); | 3118 | printk("atafb_init: initializing Falcon hw\n"); |
3119 | fbhw = &falcon_switch; | 3119 | fbhw = &falcon_switch; |
3120 | atafb_ops.fb_setcolreg = &falcon_setcolreg; | 3120 | atafb_ops.fb_setcolreg = &falcon_setcolreg; |
3121 | error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, | 3121 | error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 0, |
3122 | IRQ_TYPE_PRIO, | ||
3123 | "framebuffer:modeswitch", | 3122 | "framebuffer:modeswitch", |
3124 | falcon_vbl_switcher); | 3123 | falcon_vbl_switcher); |
3125 | if (error) | 3124 | if (error) |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 3860d02729dc..0b52d92cb2e5 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats); | |||
92 | 92 | ||
93 | /* We increase/decrease in batches which fit in a page */ | 93 | /* We increase/decrease in batches which fit in a page */ |
94 | static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; | 94 | static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; |
95 | static DEFINE_PER_CPU(struct page *, balloon_scratch_page); | ||
96 | 95 | ||
97 | 96 | ||
98 | /* List of ballooned pages, threaded through the mem_map array. */ | 97 | /* List of ballooned pages, threaded through the mem_map array. */ |
@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
423 | page = pfn_to_page(pfn); | 422 | page = pfn_to_page(pfn); |
424 | 423 | ||
425 | #ifdef CONFIG_XEN_HAVE_PVMMU | 424 | #ifdef CONFIG_XEN_HAVE_PVMMU |
426 | /* | ||
427 | * Ballooned out frames are effectively replaced with | ||
428 | * a scratch frame. Ensure direct mappings and the | ||
429 | * p2m are consistent. | ||
430 | */ | ||
431 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 425 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
432 | if (!PageHighMem(page)) { | 426 | if (!PageHighMem(page)) { |
433 | struct page *scratch_page = get_balloon_scratch_page(); | ||
434 | |||
435 | ret = HYPERVISOR_update_va_mapping( | 427 | ret = HYPERVISOR_update_va_mapping( |
436 | (unsigned long)__va(pfn << PAGE_SHIFT), | 428 | (unsigned long)__va(pfn << PAGE_SHIFT), |
437 | pfn_pte(page_to_pfn(scratch_page), | 429 | __pte_ma(0), 0); |
438 | PAGE_KERNEL_RO), 0); | ||
439 | BUG_ON(ret); | 430 | BUG_ON(ret); |
440 | |||
441 | put_balloon_scratch_page(); | ||
442 | } | 431 | } |
443 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 432 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
444 | } | 433 | } |
@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work) | |||
500 | mutex_unlock(&balloon_mutex); | 489 | mutex_unlock(&balloon_mutex); |
501 | } | 490 | } |
502 | 491 | ||
503 | struct page *get_balloon_scratch_page(void) | ||
504 | { | ||
505 | struct page *ret = get_cpu_var(balloon_scratch_page); | ||
506 | BUG_ON(ret == NULL); | ||
507 | return ret; | ||
508 | } | ||
509 | |||
510 | void put_balloon_scratch_page(void) | ||
511 | { | ||
512 | put_cpu_var(balloon_scratch_page); | ||
513 | } | ||
514 | |||
515 | /* Resets the Xen limit, sets new target, and kicks off processing. */ | 492 | /* Resets the Xen limit, sets new target, and kicks off processing. */ |
516 | void balloon_set_new_target(unsigned long target) | 493 | void balloon_set_new_target(unsigned long target) |
517 | { | 494 | { |
@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn, | |||
605 | } | 582 | } |
606 | } | 583 | } |
607 | 584 | ||
608 | static int alloc_balloon_scratch_page(int cpu) | ||
609 | { | ||
610 | if (per_cpu(balloon_scratch_page, cpu) != NULL) | ||
611 | return 0; | ||
612 | |||
613 | per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); | ||
614 | if (per_cpu(balloon_scratch_page, cpu) == NULL) { | ||
615 | pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); | ||
616 | return -ENOMEM; | ||
617 | } | ||
618 | |||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | |||
623 | static int balloon_cpu_notify(struct notifier_block *self, | ||
624 | unsigned long action, void *hcpu) | ||
625 | { | ||
626 | int cpu = (long)hcpu; | ||
627 | switch (action) { | ||
628 | case CPU_UP_PREPARE: | ||
629 | if (alloc_balloon_scratch_page(cpu)) | ||
630 | return NOTIFY_BAD; | ||
631 | break; | ||
632 | default: | ||
633 | break; | ||
634 | } | ||
635 | return NOTIFY_OK; | ||
636 | } | ||
637 | |||
638 | static struct notifier_block balloon_cpu_notifier = { | ||
639 | .notifier_call = balloon_cpu_notify, | ||
640 | }; | ||
641 | |||
642 | static int __init balloon_init(void) | 585 | static int __init balloon_init(void) |
643 | { | 586 | { |
644 | int i, cpu; | 587 | int i; |
645 | 588 | ||
646 | if (!xen_domain()) | 589 | if (!xen_domain()) |
647 | return -ENODEV; | 590 | return -ENODEV; |
648 | 591 | ||
649 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
650 | register_cpu_notifier(&balloon_cpu_notifier); | ||
651 | |||
652 | get_online_cpus(); | ||
653 | for_each_online_cpu(cpu) { | ||
654 | if (alloc_balloon_scratch_page(cpu)) { | ||
655 | put_online_cpus(); | ||
656 | unregister_cpu_notifier(&balloon_cpu_notifier); | ||
657 | return -ENOMEM; | ||
658 | } | ||
659 | } | ||
660 | put_online_cpus(); | ||
661 | } | ||
662 | |||
663 | pr_info("Initialising balloon driver\n"); | 592 | pr_info("Initialising balloon driver\n"); |
664 | 593 | ||
665 | balloon_stats.current_pages = xen_pv_domain() | 594 | balloon_stats.current_pages = xen_pv_domain() |
@@ -696,15 +625,4 @@ static int __init balloon_init(void) | |||
696 | 625 | ||
697 | subsys_initcall(balloon_init); | 626 | subsys_initcall(balloon_init); |
698 | 627 | ||
699 | static int __init balloon_clear(void) | ||
700 | { | ||
701 | int cpu; | ||
702 | |||
703 | for_each_possible_cpu(cpu) | ||
704 | per_cpu(balloon_scratch_page, cpu) = NULL; | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | early_initcall(balloon_clear); | ||
709 | |||
710 | MODULE_LICENSE("GPL"); | 628 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 073b4a19a8b0..d5bb1a33d0a3 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -67,7 +67,7 @@ struct gntdev_priv { | |||
67 | * Only populated if populate_freeable_maps == 1 */ | 67 | * Only populated if populate_freeable_maps == 1 */ |
68 | struct list_head freeable_maps; | 68 | struct list_head freeable_maps; |
69 | /* lock protects maps and freeable_maps */ | 69 | /* lock protects maps and freeable_maps */ |
70 | spinlock_t lock; | 70 | struct mutex lock; |
71 | struct mm_struct *mm; | 71 | struct mm_struct *mm; |
72 | struct mmu_notifier mn; | 72 | struct mmu_notifier mn; |
73 | }; | 73 | }; |
@@ -91,7 +91,9 @@ struct grant_map { | |||
91 | struct gnttab_map_grant_ref *map_ops; | 91 | struct gnttab_map_grant_ref *map_ops; |
92 | struct gnttab_unmap_grant_ref *unmap_ops; | 92 | struct gnttab_unmap_grant_ref *unmap_ops; |
93 | struct gnttab_map_grant_ref *kmap_ops; | 93 | struct gnttab_map_grant_ref *kmap_ops; |
94 | struct gnttab_unmap_grant_ref *kunmap_ops; | ||
94 | struct page **pages; | 95 | struct page **pages; |
96 | unsigned long pages_vm_start; | ||
95 | }; | 97 | }; |
96 | 98 | ||
97 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages); | 99 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages); |
@@ -118,12 +120,13 @@ static void gntdev_free_map(struct grant_map *map) | |||
118 | return; | 120 | return; |
119 | 121 | ||
120 | if (map->pages) | 122 | if (map->pages) |
121 | free_xenballooned_pages(map->count, map->pages); | 123 | gnttab_free_pages(map->count, map->pages); |
122 | kfree(map->pages); | 124 | kfree(map->pages); |
123 | kfree(map->grants); | 125 | kfree(map->grants); |
124 | kfree(map->map_ops); | 126 | kfree(map->map_ops); |
125 | kfree(map->unmap_ops); | 127 | kfree(map->unmap_ops); |
126 | kfree(map->kmap_ops); | 128 | kfree(map->kmap_ops); |
129 | kfree(map->kunmap_ops); | ||
127 | kfree(map); | 130 | kfree(map); |
128 | } | 131 | } |
129 | 132 | ||
@@ -140,21 +143,24 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) | |||
140 | add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); | 143 | add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); |
141 | add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); | 144 | add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); |
142 | add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); | 145 | add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); |
146 | add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL); | ||
143 | add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); | 147 | add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); |
144 | if (NULL == add->grants || | 148 | if (NULL == add->grants || |
145 | NULL == add->map_ops || | 149 | NULL == add->map_ops || |
146 | NULL == add->unmap_ops || | 150 | NULL == add->unmap_ops || |
147 | NULL == add->kmap_ops || | 151 | NULL == add->kmap_ops || |
152 | NULL == add->kunmap_ops || | ||
148 | NULL == add->pages) | 153 | NULL == add->pages) |
149 | goto err; | 154 | goto err; |
150 | 155 | ||
151 | if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */)) | 156 | if (gnttab_alloc_pages(count, add->pages)) |
152 | goto err; | 157 | goto err; |
153 | 158 | ||
154 | for (i = 0; i < count; i++) { | 159 | for (i = 0; i < count; i++) { |
155 | add->map_ops[i].handle = -1; | 160 | add->map_ops[i].handle = -1; |
156 | add->unmap_ops[i].handle = -1; | 161 | add->unmap_ops[i].handle = -1; |
157 | add->kmap_ops[i].handle = -1; | 162 | add->kmap_ops[i].handle = -1; |
163 | add->kunmap_ops[i].handle = -1; | ||
158 | } | 164 | } |
159 | 165 | ||
160 | add->index = 0; | 166 | add->index = 0; |
@@ -216,9 +222,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) | |||
216 | } | 222 | } |
217 | 223 | ||
218 | if (populate_freeable_maps && priv) { | 224 | if (populate_freeable_maps && priv) { |
219 | spin_lock(&priv->lock); | 225 | mutex_lock(&priv->lock); |
220 | list_del(&map->next); | 226 | list_del(&map->next); |
221 | spin_unlock(&priv->lock); | 227 | mutex_unlock(&priv->lock); |
222 | } | 228 | } |
223 | 229 | ||
224 | if (map->pages && !use_ptemod) | 230 | if (map->pages && !use_ptemod) |
@@ -239,6 +245,14 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token, | |||
239 | BUG_ON(pgnr >= map->count); | 245 | BUG_ON(pgnr >= map->count); |
240 | pte_maddr = arbitrary_virt_to_machine(pte).maddr; | 246 | pte_maddr = arbitrary_virt_to_machine(pte).maddr; |
241 | 247 | ||
248 | /* | ||
249 | * Set the PTE as special to force get_user_pages_fast() fall | ||
250 | * back to the slow path. If this is not supported as part of | ||
251 | * the grant map, it will be done afterwards. | ||
252 | */ | ||
253 | if (xen_feature(XENFEAT_gnttab_map_avail_bits)) | ||
254 | flags |= (1 << _GNTMAP_guest_avail0); | ||
255 | |||
242 | gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, | 256 | gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, |
243 | map->grants[pgnr].ref, | 257 | map->grants[pgnr].ref, |
244 | map->grants[pgnr].domid); | 258 | map->grants[pgnr].domid); |
@@ -247,6 +261,15 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token, | |||
247 | return 0; | 261 | return 0; |
248 | } | 262 | } |
249 | 263 | ||
264 | #ifdef CONFIG_X86 | ||
265 | static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token, | ||
266 | unsigned long addr, void *data) | ||
267 | { | ||
268 | set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte)); | ||
269 | return 0; | ||
270 | } | ||
271 | #endif | ||
272 | |||
250 | static int map_grant_pages(struct grant_map *map) | 273 | static int map_grant_pages(struct grant_map *map) |
251 | { | 274 | { |
252 | int i, err = 0; | 275 | int i, err = 0; |
@@ -280,6 +303,8 @@ static int map_grant_pages(struct grant_map *map) | |||
280 | map->flags | GNTMAP_host_map, | 303 | map->flags | GNTMAP_host_map, |
281 | map->grants[i].ref, | 304 | map->grants[i].ref, |
282 | map->grants[i].domid); | 305 | map->grants[i].domid); |
306 | gnttab_set_unmap_op(&map->kunmap_ops[i], address, | ||
307 | map->flags | GNTMAP_host_map, -1); | ||
283 | } | 308 | } |
284 | } | 309 | } |
285 | 310 | ||
@@ -290,20 +315,42 @@ static int map_grant_pages(struct grant_map *map) | |||
290 | return err; | 315 | return err; |
291 | 316 | ||
292 | for (i = 0; i < map->count; i++) { | 317 | for (i = 0; i < map->count; i++) { |
293 | if (map->map_ops[i].status) | 318 | if (map->map_ops[i].status) { |
294 | err = -EINVAL; | 319 | err = -EINVAL; |
295 | else { | 320 | continue; |
296 | BUG_ON(map->map_ops[i].handle == -1); | ||
297 | map->unmap_ops[i].handle = map->map_ops[i].handle; | ||
298 | pr_debug("map handle=%d\n", map->map_ops[i].handle); | ||
299 | } | 321 | } |
322 | |||
323 | map->unmap_ops[i].handle = map->map_ops[i].handle; | ||
324 | if (use_ptemod) | ||
325 | map->kunmap_ops[i].handle = map->kmap_ops[i].handle; | ||
300 | } | 326 | } |
301 | return err; | 327 | return err; |
302 | } | 328 | } |
303 | 329 | ||
330 | struct unmap_grant_pages_callback_data | ||
331 | { | ||
332 | struct completion completion; | ||
333 | int result; | ||
334 | }; | ||
335 | |||
336 | static void unmap_grant_callback(int result, | ||
337 | struct gntab_unmap_queue_data *data) | ||
338 | { | ||
339 | struct unmap_grant_pages_callback_data* d = data->data; | ||
340 | |||
341 | d->result = result; | ||
342 | complete(&d->completion); | ||
343 | } | ||
344 | |||
304 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | 345 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) |
305 | { | 346 | { |
306 | int i, err = 0; | 347 | int i, err = 0; |
348 | struct gntab_unmap_queue_data unmap_data; | ||
349 | struct unmap_grant_pages_callback_data data; | ||
350 | |||
351 | init_completion(&data.completion); | ||
352 | unmap_data.data = &data; | ||
353 | unmap_data.done= &unmap_grant_callback; | ||
307 | 354 | ||
308 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { | 355 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
309 | int pgno = (map->notify.addr >> PAGE_SHIFT); | 356 | int pgno = (map->notify.addr >> PAGE_SHIFT); |
@@ -315,11 +362,16 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
315 | } | 362 | } |
316 | } | 363 | } |
317 | 364 | ||
318 | err = gnttab_unmap_refs(map->unmap_ops + offset, | 365 | unmap_data.unmap_ops = map->unmap_ops + offset; |
319 | use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, | 366 | unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; |
320 | pages); | 367 | unmap_data.pages = map->pages + offset; |
321 | if (err) | 368 | unmap_data.count = pages; |
322 | return err; | 369 | |
370 | gnttab_unmap_refs_async(&unmap_data); | ||
371 | |||
372 | wait_for_completion(&data.completion); | ||
373 | if (data.result) | ||
374 | return data.result; | ||
323 | 375 | ||
324 | for (i = 0; i < pages; i++) { | 376 | for (i = 0; i < pages; i++) { |
325 | if (map->unmap_ops[offset+i].status) | 377 | if (map->unmap_ops[offset+i].status) |
@@ -387,17 +439,26 @@ static void gntdev_vma_close(struct vm_area_struct *vma) | |||
387 | * not do any unmapping, since that has been done prior to | 439 | * not do any unmapping, since that has been done prior to |
388 | * closing the vma, but it may still iterate the unmap_ops list. | 440 | * closing the vma, but it may still iterate the unmap_ops list. |
389 | */ | 441 | */ |
390 | spin_lock(&priv->lock); | 442 | mutex_lock(&priv->lock); |
391 | map->vma = NULL; | 443 | map->vma = NULL; |
392 | spin_unlock(&priv->lock); | 444 | mutex_unlock(&priv->lock); |
393 | } | 445 | } |
394 | vma->vm_private_data = NULL; | 446 | vma->vm_private_data = NULL; |
395 | gntdev_put_map(priv, map); | 447 | gntdev_put_map(priv, map); |
396 | } | 448 | } |
397 | 449 | ||
450 | static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, | ||
451 | unsigned long addr) | ||
452 | { | ||
453 | struct grant_map *map = vma->vm_private_data; | ||
454 | |||
455 | return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT]; | ||
456 | } | ||
457 | |||
398 | static struct vm_operations_struct gntdev_vmops = { | 458 | static struct vm_operations_struct gntdev_vmops = { |
399 | .open = gntdev_vma_open, | 459 | .open = gntdev_vma_open, |
400 | .close = gntdev_vma_close, | 460 | .close = gntdev_vma_close, |
461 | .find_special_page = gntdev_vma_find_special_page, | ||
401 | }; | 462 | }; |
402 | 463 | ||
403 | /* ------------------------------------------------------------------ */ | 464 | /* ------------------------------------------------------------------ */ |
@@ -433,14 +494,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn, | |||
433 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | 494 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
434 | struct grant_map *map; | 495 | struct grant_map *map; |
435 | 496 | ||
436 | spin_lock(&priv->lock); | 497 | mutex_lock(&priv->lock); |
437 | list_for_each_entry(map, &priv->maps, next) { | 498 | list_for_each_entry(map, &priv->maps, next) { |
438 | unmap_if_in_range(map, start, end); | 499 | unmap_if_in_range(map, start, end); |
439 | } | 500 | } |
440 | list_for_each_entry(map, &priv->freeable_maps, next) { | 501 | list_for_each_entry(map, &priv->freeable_maps, next) { |
441 | unmap_if_in_range(map, start, end); | 502 | unmap_if_in_range(map, start, end); |
442 | } | 503 | } |
443 | spin_unlock(&priv->lock); | 504 | mutex_unlock(&priv->lock); |
444 | } | 505 | } |
445 | 506 | ||
446 | static void mn_invl_page(struct mmu_notifier *mn, | 507 | static void mn_invl_page(struct mmu_notifier *mn, |
@@ -457,7 +518,7 @@ static void mn_release(struct mmu_notifier *mn, | |||
457 | struct grant_map *map; | 518 | struct grant_map *map; |
458 | int err; | 519 | int err; |
459 | 520 | ||
460 | spin_lock(&priv->lock); | 521 | mutex_lock(&priv->lock); |
461 | list_for_each_entry(map, &priv->maps, next) { | 522 | list_for_each_entry(map, &priv->maps, next) { |
462 | if (!map->vma) | 523 | if (!map->vma) |
463 | continue; | 524 | continue; |
@@ -476,7 +537,7 @@ static void mn_release(struct mmu_notifier *mn, | |||
476 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | 537 | err = unmap_grant_pages(map, /* offset */ 0, map->count); |
477 | WARN_ON(err); | 538 | WARN_ON(err); |
478 | } | 539 | } |
479 | spin_unlock(&priv->lock); | 540 | mutex_unlock(&priv->lock); |
480 | } | 541 | } |
481 | 542 | ||
482 | static struct mmu_notifier_ops gntdev_mmu_ops = { | 543 | static struct mmu_notifier_ops gntdev_mmu_ops = { |
@@ -498,7 +559,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) | |||
498 | 559 | ||
499 | INIT_LIST_HEAD(&priv->maps); | 560 | INIT_LIST_HEAD(&priv->maps); |
500 | INIT_LIST_HEAD(&priv->freeable_maps); | 561 | INIT_LIST_HEAD(&priv->freeable_maps); |
501 | spin_lock_init(&priv->lock); | 562 | mutex_init(&priv->lock); |
502 | 563 | ||
503 | if (use_ptemod) { | 564 | if (use_ptemod) { |
504 | priv->mm = get_task_mm(current); | 565 | priv->mm = get_task_mm(current); |
@@ -572,10 +633,10 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, | |||
572 | return -EFAULT; | 633 | return -EFAULT; |
573 | } | 634 | } |
574 | 635 | ||
575 | spin_lock(&priv->lock); | 636 | mutex_lock(&priv->lock); |
576 | gntdev_add_map(priv, map); | 637 | gntdev_add_map(priv, map); |
577 | op.index = map->index << PAGE_SHIFT; | 638 | op.index = map->index << PAGE_SHIFT; |
578 | spin_unlock(&priv->lock); | 639 | mutex_unlock(&priv->lock); |
579 | 640 | ||
580 | if (copy_to_user(u, &op, sizeof(op)) != 0) | 641 | if (copy_to_user(u, &op, sizeof(op)) != 0) |
581 | return -EFAULT; | 642 | return -EFAULT; |
@@ -594,7 +655,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | |||
594 | return -EFAULT; | 655 | return -EFAULT; |
595 | pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); | 656 | pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); |
596 | 657 | ||
597 | spin_lock(&priv->lock); | 658 | mutex_lock(&priv->lock); |
598 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); | 659 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); |
599 | if (map) { | 660 | if (map) { |
600 | list_del(&map->next); | 661 | list_del(&map->next); |
@@ -602,7 +663,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | |||
602 | list_add_tail(&map->next, &priv->freeable_maps); | 663 | list_add_tail(&map->next, &priv->freeable_maps); |
603 | err = 0; | 664 | err = 0; |
604 | } | 665 | } |
605 | spin_unlock(&priv->lock); | 666 | mutex_unlock(&priv->lock); |
606 | if (map) | 667 | if (map) |
607 | gntdev_put_map(priv, map); | 668 | gntdev_put_map(priv, map); |
608 | return err; | 669 | return err; |
@@ -670,7 +731,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | |||
670 | out_flags = op.action; | 731 | out_flags = op.action; |
671 | out_event = op.event_channel_port; | 732 | out_event = op.event_channel_port; |
672 | 733 | ||
673 | spin_lock(&priv->lock); | 734 | mutex_lock(&priv->lock); |
674 | 735 | ||
675 | list_for_each_entry(map, &priv->maps, next) { | 736 | list_for_each_entry(map, &priv->maps, next) { |
676 | uint64_t begin = map->index << PAGE_SHIFT; | 737 | uint64_t begin = map->index << PAGE_SHIFT; |
@@ -698,7 +759,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | |||
698 | rc = 0; | 759 | rc = 0; |
699 | 760 | ||
700 | unlock_out: | 761 | unlock_out: |
701 | spin_unlock(&priv->lock); | 762 | mutex_unlock(&priv->lock); |
702 | 763 | ||
703 | /* Drop the reference to the event channel we did not save in the map */ | 764 | /* Drop the reference to the event channel we did not save in the map */ |
704 | if (out_flags & UNMAP_NOTIFY_SEND_EVENT) | 765 | if (out_flags & UNMAP_NOTIFY_SEND_EVENT) |
@@ -748,7 +809,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
748 | pr_debug("map %d+%d at %lx (pgoff %lx)\n", | 809 | pr_debug("map %d+%d at %lx (pgoff %lx)\n", |
749 | index, count, vma->vm_start, vma->vm_pgoff); | 810 | index, count, vma->vm_start, vma->vm_pgoff); |
750 | 811 | ||
751 | spin_lock(&priv->lock); | 812 | mutex_lock(&priv->lock); |
752 | map = gntdev_find_map_index(priv, index, count); | 813 | map = gntdev_find_map_index(priv, index, count); |
753 | if (!map) | 814 | if (!map) |
754 | goto unlock_out; | 815 | goto unlock_out; |
@@ -783,7 +844,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
783 | map->flags |= GNTMAP_readonly; | 844 | map->flags |= GNTMAP_readonly; |
784 | } | 845 | } |
785 | 846 | ||
786 | spin_unlock(&priv->lock); | 847 | mutex_unlock(&priv->lock); |
787 | 848 | ||
788 | if (use_ptemod) { | 849 | if (use_ptemod) { |
789 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, | 850 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, |
@@ -806,16 +867,34 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
806 | if (err) | 867 | if (err) |
807 | goto out_put_map; | 868 | goto out_put_map; |
808 | } | 869 | } |
870 | } else { | ||
871 | #ifdef CONFIG_X86 | ||
872 | /* | ||
873 | * If the PTEs were not made special by the grant map | ||
874 | * hypercall, do so here. | ||
875 | * | ||
876 | * This is racy since the mapping is already visible | ||
877 | * to userspace but userspace should be well-behaved | ||
878 | * enough to not touch it until the mmap() call | ||
879 | * returns. | ||
880 | */ | ||
881 | if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) { | ||
882 | apply_to_page_range(vma->vm_mm, vma->vm_start, | ||
883 | vma->vm_end - vma->vm_start, | ||
884 | set_grant_ptes_as_special, NULL); | ||
885 | } | ||
886 | #endif | ||
887 | map->pages_vm_start = vma->vm_start; | ||
809 | } | 888 | } |
810 | 889 | ||
811 | return 0; | 890 | return 0; |
812 | 891 | ||
813 | unlock_out: | 892 | unlock_out: |
814 | spin_unlock(&priv->lock); | 893 | mutex_unlock(&priv->lock); |
815 | return err; | 894 | return err; |
816 | 895 | ||
817 | out_unlock_put: | 896 | out_unlock_put: |
818 | spin_unlock(&priv->lock); | 897 | mutex_unlock(&priv->lock); |
819 | out_put_map: | 898 | out_put_map: |
820 | if (use_ptemod) | 899 | if (use_ptemod) |
821 | map->vma = NULL; | 900 | map->vma = NULL; |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7786291ba229..17972fbacddc 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/io.h> | 42 | #include <linux/io.h> |
43 | #include <linux/delay.h> | 43 | #include <linux/delay.h> |
44 | #include <linux/hardirq.h> | 44 | #include <linux/hardirq.h> |
45 | #include <linux/workqueue.h> | ||
45 | 46 | ||
46 | #include <xen/xen.h> | 47 | #include <xen/xen.h> |
47 | #include <xen/interface/xen.h> | 48 | #include <xen/interface/xen.h> |
@@ -50,6 +51,7 @@ | |||
50 | #include <xen/interface/memory.h> | 51 | #include <xen/interface/memory.h> |
51 | #include <xen/hvc-console.h> | 52 | #include <xen/hvc-console.h> |
52 | #include <xen/swiotlb-xen.h> | 53 | #include <xen/swiotlb-xen.h> |
54 | #include <xen/balloon.h> | ||
53 | #include <asm/xen/hypercall.h> | 55 | #include <asm/xen/hypercall.h> |
54 | #include <asm/xen/interface.h> | 56 | #include <asm/xen/interface.h> |
55 | 57 | ||
@@ -671,6 +673,59 @@ void gnttab_free_auto_xlat_frames(void) | |||
671 | } | 673 | } |
672 | EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); | 674 | EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); |
673 | 675 | ||
676 | /** | ||
677 | * gnttab_alloc_pages - alloc pages suitable for grant mapping into | ||
678 | * @nr_pages: number of pages to alloc | ||
679 | * @pages: returns the pages | ||
680 | */ | ||
681 | int gnttab_alloc_pages(int nr_pages, struct page **pages) | ||
682 | { | ||
683 | int i; | ||
684 | int ret; | ||
685 | |||
686 | ret = alloc_xenballooned_pages(nr_pages, pages, false); | ||
687 | if (ret < 0) | ||
688 | return ret; | ||
689 | |||
690 | for (i = 0; i < nr_pages; i++) { | ||
691 | #if BITS_PER_LONG < 64 | ||
692 | struct xen_page_foreign *foreign; | ||
693 | |||
694 | foreign = kzalloc(sizeof(*foreign), GFP_KERNEL); | ||
695 | if (!foreign) { | ||
696 | gnttab_free_pages(nr_pages, pages); | ||
697 | return -ENOMEM; | ||
698 | } | ||
699 | set_page_private(pages[i], (unsigned long)foreign); | ||
700 | #endif | ||
701 | SetPagePrivate(pages[i]); | ||
702 | } | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | EXPORT_SYMBOL(gnttab_alloc_pages); | ||
707 | |||
708 | /** | ||
709 | * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() | ||
710 | * @nr_pages; number of pages to free | ||
711 | * @pages: the pages | ||
712 | */ | ||
713 | void gnttab_free_pages(int nr_pages, struct page **pages) | ||
714 | { | ||
715 | int i; | ||
716 | |||
717 | for (i = 0; i < nr_pages; i++) { | ||
718 | if (PagePrivate(pages[i])) { | ||
719 | #if BITS_PER_LONG < 64 | ||
720 | kfree((void *)page_private(pages[i])); | ||
721 | #endif | ||
722 | ClearPagePrivate(pages[i]); | ||
723 | } | ||
724 | } | ||
725 | free_xenballooned_pages(nr_pages, pages); | ||
726 | } | ||
727 | EXPORT_SYMBOL(gnttab_free_pages); | ||
728 | |||
674 | /* Handling of paged out grant targets (GNTST_eagain) */ | 729 | /* Handling of paged out grant targets (GNTST_eagain) */ |
675 | #define MAX_DELAY 256 | 730 | #define MAX_DELAY 256 |
676 | static inline void | 731 | static inline void |
@@ -727,30 +782,87 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
727 | if (ret) | 782 | if (ret) |
728 | return ret; | 783 | return ret; |
729 | 784 | ||
730 | /* Retry eagain maps */ | 785 | for (i = 0; i < count; i++) { |
731 | for (i = 0; i < count; i++) | 786 | /* Retry eagain maps */ |
732 | if (map_ops[i].status == GNTST_eagain) | 787 | if (map_ops[i].status == GNTST_eagain) |
733 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, | 788 | gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, |
734 | &map_ops[i].status, __func__); | 789 | &map_ops[i].status, __func__); |
735 | 790 | ||
791 | if (map_ops[i].status == GNTST_okay) { | ||
792 | struct xen_page_foreign *foreign; | ||
793 | |||
794 | SetPageForeign(pages[i]); | ||
795 | foreign = xen_page_foreign(pages[i]); | ||
796 | foreign->domid = map_ops[i].dom; | ||
797 | foreign->gref = map_ops[i].ref; | ||
798 | } | ||
799 | } | ||
800 | |||
736 | return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); | 801 | return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); |
737 | } | 802 | } |
738 | EXPORT_SYMBOL_GPL(gnttab_map_refs); | 803 | EXPORT_SYMBOL_GPL(gnttab_map_refs); |
739 | 804 | ||
740 | int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | 805 | int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, |
741 | struct gnttab_map_grant_ref *kmap_ops, | 806 | struct gnttab_unmap_grant_ref *kunmap_ops, |
742 | struct page **pages, unsigned int count) | 807 | struct page **pages, unsigned int count) |
743 | { | 808 | { |
809 | unsigned int i; | ||
744 | int ret; | 810 | int ret; |
745 | 811 | ||
746 | ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); | 812 | ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); |
747 | if (ret) | 813 | if (ret) |
748 | return ret; | 814 | return ret; |
749 | 815 | ||
750 | return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count); | 816 | for (i = 0; i < count; i++) |
817 | ClearPageForeign(pages[i]); | ||
818 | |||
819 | return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count); | ||
751 | } | 820 | } |
752 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); | 821 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); |
753 | 822 | ||
823 | #define GNTTAB_UNMAP_REFS_DELAY 5 | ||
824 | |||
825 | static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); | ||
826 | |||
827 | static void gnttab_unmap_work(struct work_struct *work) | ||
828 | { | ||
829 | struct gntab_unmap_queue_data | ||
830 | *unmap_data = container_of(work, | ||
831 | struct gntab_unmap_queue_data, | ||
832 | gnttab_work.work); | ||
833 | if (unmap_data->age != UINT_MAX) | ||
834 | unmap_data->age++; | ||
835 | __gnttab_unmap_refs_async(unmap_data); | ||
836 | } | ||
837 | |||
838 | static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) | ||
839 | { | ||
840 | int ret; | ||
841 | int pc; | ||
842 | |||
843 | for (pc = 0; pc < item->count; pc++) { | ||
844 | if (page_count(item->pages[pc]) > 1) { | ||
845 | unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1); | ||
846 | schedule_delayed_work(&item->gnttab_work, | ||
847 | msecs_to_jiffies(delay)); | ||
848 | return; | ||
849 | } | ||
850 | } | ||
851 | |||
852 | ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops, | ||
853 | item->pages, item->count); | ||
854 | item->done(ret, item); | ||
855 | } | ||
856 | |||
857 | void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) | ||
858 | { | ||
859 | INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work); | ||
860 | item->age = 0; | ||
861 | |||
862 | __gnttab_unmap_refs_async(item); | ||
863 | } | ||
864 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); | ||
865 | |||
754 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) | 866 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) |
755 | { | 867 | { |
756 | int rc; | 868 | int rc; |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index f8bb36f9d9ce..bf1940706422 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -105,10 +105,16 @@ static void do_suspend(void) | |||
105 | 105 | ||
106 | err = freeze_processes(); | 106 | err = freeze_processes(); |
107 | if (err) { | 107 | if (err) { |
108 | pr_err("%s: freeze failed %d\n", __func__, err); | 108 | pr_err("%s: freeze processes failed %d\n", __func__, err); |
109 | goto out; | 109 | goto out; |
110 | } | 110 | } |
111 | 111 | ||
112 | err = freeze_kernel_threads(); | ||
113 | if (err) { | ||
114 | pr_err("%s: freeze kernel threads failed %d\n", __func__, err); | ||
115 | goto out_thaw; | ||
116 | } | ||
117 | |||
112 | err = dpm_suspend_start(PMSG_FREEZE); | 118 | err = dpm_suspend_start(PMSG_FREEZE); |
113 | if (err) { | 119 | if (err) { |
114 | pr_err("%s: dpm_suspend_start %d\n", __func__, err); | 120 | pr_err("%s: dpm_suspend_start %d\n", __func__, err); |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 83b5c53bec6b..8a65423bc696 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -374,7 +374,7 @@ static struct frontswap_ops tmem_frontswap_ops = { | |||
374 | }; | 374 | }; |
375 | #endif | 375 | #endif |
376 | 376 | ||
377 | static int xen_tmem_init(void) | 377 | static int __init xen_tmem_init(void) |
378 | { | 378 | { |
379 | if (!xen_domain()) | 379 | if (!xen_domain()) |
380 | return 0; | 380 | return 0; |
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c index 34e40b733f9a..4fc886cd5586 100644 --- a/drivers/xen/xen-acpi-memhotplug.c +++ b/drivers/xen/xen-acpi-memhotplug.c | |||
@@ -117,8 +117,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context) | |||
117 | list_for_each_entry(info, &mem_device->res_list, list) { | 117 | list_for_each_entry(info, &mem_device->res_list, list) { |
118 | if ((info->caching == address64.info.mem.caching) && | 118 | if ((info->caching == address64.info.mem.caching) && |
119 | (info->write_protect == address64.info.mem.write_protect) && | 119 | (info->write_protect == address64.info.mem.write_protect) && |
120 | (info->start_addr + info->length == address64.minimum)) { | 120 | (info->start_addr + info->length == address64.address.minimum)) { |
121 | info->length += address64.address_length; | 121 | info->length += address64.address.address_length; |
122 | return AE_OK; | 122 | return AE_OK; |
123 | } | 123 | } |
124 | } | 124 | } |
@@ -130,8 +130,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context) | |||
130 | INIT_LIST_HEAD(&new->list); | 130 | INIT_LIST_HEAD(&new->list); |
131 | new->caching = address64.info.mem.caching; | 131 | new->caching = address64.info.mem.caching; |
132 | new->write_protect = address64.info.mem.write_protect; | 132 | new->write_protect = address64.info.mem.write_protect; |
133 | new->start_addr = address64.minimum; | 133 | new->start_addr = address64.address.minimum; |
134 | new->length = address64.address_length; | 134 | new->length = address64.address.address_length; |
135 | list_add_tail(&new->list, &mem_device->res_list); | 135 | list_add_tail(&new->list, &mem_device->res_list); |
136 | 136 | ||
137 | return AE_OK; | 137 | return AE_OK; |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index e999496eda3e..ecd540a7a562 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num) | |||
227 | return; | 227 | return; |
228 | if (i > scsiback_max_buffer_pages) { | 228 | if (i > scsiback_max_buffer_pages) { |
229 | n = min(num, i - scsiback_max_buffer_pages); | 229 | n = min(num, i - scsiback_max_buffer_pages); |
230 | free_xenballooned_pages(n, page + num - n); | 230 | gnttab_free_pages(n, page + num - n); |
231 | n = num - n; | 231 | n = num - n; |
232 | } | 232 | } |
233 | spin_lock_irqsave(&free_pages_lock, flags); | 233 | spin_lock_irqsave(&free_pages_lock, flags); |
@@ -244,7 +244,7 @@ static int get_free_page(struct page **page) | |||
244 | spin_lock_irqsave(&free_pages_lock, flags); | 244 | spin_lock_irqsave(&free_pages_lock, flags); |
245 | if (list_empty(&scsiback_free_pages)) { | 245 | if (list_empty(&scsiback_free_pages)) { |
246 | spin_unlock_irqrestore(&free_pages_lock, flags); | 246 | spin_unlock_irqrestore(&free_pages_lock, flags); |
247 | return alloc_xenballooned_pages(1, page, false); | 247 | return gnttab_alloc_pages(1, page); |
248 | } | 248 | } |
249 | page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); | 249 | page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); |
250 | list_del(&page[0]->lru); | 250 | list_del(&page[0]->lru); |
@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void) | |||
2106 | while (free_pages_num) { | 2106 | while (free_pages_num) { |
2107 | if (get_free_page(&page)) | 2107 | if (get_free_page(&page)) |
2108 | BUG(); | 2108 | BUG(); |
2109 | free_xenballooned_pages(1, &page); | 2109 | gnttab_free_pages(1, &page); |
2110 | } | 2110 | } |
2111 | scsiback_deregister_configfs(); | 2111 | scsiback_deregister_configfs(); |
2112 | xenbus_unregister_driver(&scsiback_driver); | 2112 | xenbus_unregister_driver(&scsiback_driver); |
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 85534ea63555..9433e46518c8 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
@@ -326,10 +326,13 @@ static int xenbus_write_transaction(unsigned msg_type, | |||
326 | } | 326 | } |
327 | 327 | ||
328 | if (msg_type == XS_TRANSACTION_START) { | 328 | if (msg_type == XS_TRANSACTION_START) { |
329 | trans->handle.id = simple_strtoul(reply, NULL, 0); | 329 | if (u->u.msg.type == XS_ERROR) |
330 | 330 | kfree(trans); | |
331 | list_add(&trans->list, &u->transactions); | 331 | else { |
332 | } else if (msg_type == XS_TRANSACTION_END) { | 332 | trans->handle.id = simple_strtoul(reply, NULL, 0); |
333 | list_add(&trans->list, &u->transactions); | ||
334 | } | ||
335 | } else if (u->u.msg.type == XS_TRANSACTION_END) { | ||
333 | list_for_each_entry(trans, &u->transactions, list) | 336 | list_for_each_entry(trans, &u->transactions, list) |
334 | if (trans->handle.id == u->u.msg.tx_id) | 337 | if (trans->handle.id == u->u.msg.tx_id) |
335 | break; | 338 | break; |