diff options
author | Gleb Natapov <gleb@redhat.com> | 2013-10-17 10:04:47 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-10-17 10:41:49 -0400 |
commit | 13acfd5715144518232d8e29acf7c76ef1b089d8 (patch) | |
tree | 093c4311a737210bbdd3d1919f8193cb5f017241 /drivers | |
parent | d570142674890fe10b3d7d86aa105e3dfce1ddfa (diff) | |
parent | 34ec4de42be5006abdd8d0c08b306ffaa64d0d5d (diff) |
Powerpc KVM work is based on a commit after rc4.
Merging master into next to satisfy the dependencies.
Conflicts:
arch/arm/kvm/reset.c
Diffstat (limited to 'drivers')
294 files changed, 2428 insertions, 1698 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 22327e6a7236..5ea5c32609ac 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -24,7 +24,7 @@ menuconfig ACPI | |||
24 | are configured, ACPI is used. | 24 | are configured, ACPI is used. |
25 | 25 | ||
26 | The project home page for the Linux ACPI subsystem is here: | 26 | The project home page for the Linux ACPI subsystem is here: |
27 | <http://www.lesswatts.org/projects/acpi/> | 27 | <https://01.org/linux-acpi> |
28 | 28 | ||
29 | Linux support for ACPI is based on Intel Corporation's ACPI | 29 | Linux support for ACPI is based on Intel Corporation's ACPI |
30 | Component Architecture (ACPI CA). For more information on the | 30 | Component Architecture (ACPI CA). For more information on the |
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index f40acef80269..a6977e12d574 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/ipmi.h> | 39 | #include <linux/ipmi.h> |
40 | #include <linux/device.h> | 40 | #include <linux/device.h> |
41 | #include <linux/pnp.h> | 41 | #include <linux/pnp.h> |
42 | #include <linux/spinlock.h> | ||
42 | 43 | ||
43 | MODULE_AUTHOR("Zhao Yakui"); | 44 | MODULE_AUTHOR("Zhao Yakui"); |
44 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); | 45 | MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); |
@@ -57,7 +58,7 @@ struct acpi_ipmi_device { | |||
57 | struct list_head head; | 58 | struct list_head head; |
58 | /* the IPMI request message list */ | 59 | /* the IPMI request message list */ |
59 | struct list_head tx_msg_list; | 60 | struct list_head tx_msg_list; |
60 | struct mutex tx_msg_lock; | 61 | spinlock_t tx_msg_lock; |
61 | acpi_handle handle; | 62 | acpi_handle handle; |
62 | struct pnp_dev *pnp_dev; | 63 | struct pnp_dev *pnp_dev; |
63 | ipmi_user_t user_interface; | 64 | ipmi_user_t user_interface; |
@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | |||
147 | struct kernel_ipmi_msg *msg; | 148 | struct kernel_ipmi_msg *msg; |
148 | struct acpi_ipmi_buffer *buffer; | 149 | struct acpi_ipmi_buffer *buffer; |
149 | struct acpi_ipmi_device *device; | 150 | struct acpi_ipmi_device *device; |
151 | unsigned long flags; | ||
150 | 152 | ||
151 | msg = &tx_msg->tx_message; | 153 | msg = &tx_msg->tx_message; |
152 | /* | 154 | /* |
@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, | |||
177 | 179 | ||
178 | /* Get the msgid */ | 180 | /* Get the msgid */ |
179 | device = tx_msg->device; | 181 | device = tx_msg->device; |
180 | mutex_lock(&device->tx_msg_lock); | 182 | spin_lock_irqsave(&device->tx_msg_lock, flags); |
181 | device->curr_msgid++; | 183 | device->curr_msgid++; |
182 | tx_msg->tx_msgid = device->curr_msgid; | 184 | tx_msg->tx_msgid = device->curr_msgid; |
183 | mutex_unlock(&device->tx_msg_lock); | 185 | spin_unlock_irqrestore(&device->tx_msg_lock, flags); |
184 | } | 186 | } |
185 | 187 | ||
186 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, | 188 | static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, |
@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) | |||
242 | int msg_found = 0; | 244 | int msg_found = 0; |
243 | struct acpi_ipmi_msg *tx_msg; | 245 | struct acpi_ipmi_msg *tx_msg; |
244 | struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; | 246 | struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; |
247 | unsigned long flags; | ||
245 | 248 | ||
246 | if (msg->user != ipmi_device->user_interface) { | 249 | if (msg->user != ipmi_device->user_interface) { |
247 | dev_warn(&pnp_dev->dev, "Unexpected response is returned. " | 250 | dev_warn(&pnp_dev->dev, "Unexpected response is returned. " |
@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) | |||
250 | ipmi_free_recv_msg(msg); | 253 | ipmi_free_recv_msg(msg); |
251 | return; | 254 | return; |
252 | } | 255 | } |
253 | mutex_lock(&ipmi_device->tx_msg_lock); | 256 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
254 | list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { | 257 | list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { |
255 | if (msg->msgid == tx_msg->tx_msgid) { | 258 | if (msg->msgid == tx_msg->tx_msgid) { |
256 | msg_found = 1; | 259 | msg_found = 1; |
@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) | |||
258 | } | 261 | } |
259 | } | 262 | } |
260 | 263 | ||
261 | mutex_unlock(&ipmi_device->tx_msg_lock); | 264 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
262 | if (!msg_found) { | 265 | if (!msg_found) { |
263 | dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " | 266 | dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " |
264 | "returned.\n", msg->msgid); | 267 | "returned.\n", msg->msgid); |
@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | |||
378 | struct acpi_ipmi_device *ipmi_device = handler_context; | 381 | struct acpi_ipmi_device *ipmi_device = handler_context; |
379 | int err, rem_time; | 382 | int err, rem_time; |
380 | acpi_status status; | 383 | acpi_status status; |
384 | unsigned long flags; | ||
381 | /* | 385 | /* |
382 | * IPMI opregion message. | 386 | * IPMI opregion message. |
383 | * IPMI message is firstly written to the BMC and system software | 387 | * IPMI message is firstly written to the BMC and system software |
@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | |||
395 | return AE_NO_MEMORY; | 399 | return AE_NO_MEMORY; |
396 | 400 | ||
397 | acpi_format_ipmi_msg(tx_msg, address, value); | 401 | acpi_format_ipmi_msg(tx_msg, address, value); |
398 | mutex_lock(&ipmi_device->tx_msg_lock); | 402 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
399 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); | 403 | list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); |
400 | mutex_unlock(&ipmi_device->tx_msg_lock); | 404 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
401 | err = ipmi_request_settime(ipmi_device->user_interface, | 405 | err = ipmi_request_settime(ipmi_device->user_interface, |
402 | &tx_msg->addr, | 406 | &tx_msg->addr, |
403 | tx_msg->tx_msgid, | 407 | tx_msg->tx_msgid, |
@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, | |||
413 | status = AE_OK; | 417 | status = AE_OK; |
414 | 418 | ||
415 | end_label: | 419 | end_label: |
416 | mutex_lock(&ipmi_device->tx_msg_lock); | 420 | spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); |
417 | list_del(&tx_msg->head); | 421 | list_del(&tx_msg->head); |
418 | mutex_unlock(&ipmi_device->tx_msg_lock); | 422 | spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); |
419 | kfree(tx_msg); | 423 | kfree(tx_msg); |
420 | return status; | 424 | return status; |
421 | } | 425 | } |
@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) | |||
457 | 461 | ||
458 | INIT_LIST_HEAD(&ipmi_device->head); | 462 | INIT_LIST_HEAD(&ipmi_device->head); |
459 | 463 | ||
460 | mutex_init(&ipmi_device->tx_msg_lock); | 464 | spin_lock_init(&ipmi_device->tx_msg_lock); |
461 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); | 465 | INIT_LIST_HEAD(&ipmi_device->tx_msg_list); |
462 | ipmi_install_space_handler(ipmi_device); | 466 | ipmi_install_space_handler(ipmi_device); |
463 | 467 | ||
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index fbdb82e70d10..407ad13cac2f 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -968,7 +968,7 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) | |||
968 | } | 968 | } |
969 | return 0; | 969 | return 0; |
970 | } | 970 | } |
971 | EXPORT_SYMBOL_GPL(acpi_bus_get_device); | 971 | EXPORT_SYMBOL(acpi_bus_get_device); |
972 | 972 | ||
973 | int acpi_device_add(struct acpi_device *device, | 973 | int acpi_device_add(struct acpi_device *device, |
974 | void (*release)(struct device *)) | 974 | void (*release)(struct device *)) |
@@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver) | |||
1121 | EXPORT_SYMBOL(acpi_bus_register_driver); | 1121 | EXPORT_SYMBOL(acpi_bus_register_driver); |
1122 | 1122 | ||
1123 | /** | 1123 | /** |
1124 | * acpi_bus_unregister_driver - unregisters a driver with the APIC bus | 1124 | * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus |
1125 | * @driver: driver to unregister | 1125 | * @driver: driver to unregister |
1126 | * | 1126 | * |
1127 | * Unregisters a driver with the ACPI bus. Searches the namespace for all | 1127 | * Unregisters a driver with the ACPI bus. Searches the namespace for all |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 958ba2a420c3..97f4acb54ad6 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * sata_promise.c - Promise SATA | 2 | * sata_promise.c - Promise SATA |
3 | * | 3 | * |
4 | * Maintained by: Tejun Heo <tj@kernel.org> | 4 | * Maintained by: Tejun Heo <tj@kernel.org> |
5 | * Mikael Pettersson <mikpe@it.uu.se> | 5 | * Mikael Pettersson |
6 | * Please ALWAYS copy linux-ide@vger.kernel.org | 6 | * Please ALWAYS copy linux-ide@vger.kernel.org |
7 | * on emails. | 7 | * on emails. |
8 | * | 8 | * |
diff --git a/drivers/base/core.c b/drivers/base/core.c index c7cfadcf6752..34abf4d8a45f 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move); | |||
2017 | */ | 2017 | */ |
2018 | void device_shutdown(void) | 2018 | void device_shutdown(void) |
2019 | { | 2019 | { |
2020 | struct device *dev; | 2020 | struct device *dev, *parent; |
2021 | 2021 | ||
2022 | spin_lock(&devices_kset->list_lock); | 2022 | spin_lock(&devices_kset->list_lock); |
2023 | /* | 2023 | /* |
@@ -2034,7 +2034,7 @@ void device_shutdown(void) | |||
2034 | * prevent it from being freed because parent's | 2034 | * prevent it from being freed because parent's |
2035 | * lock is to be held | 2035 | * lock is to be held |
2036 | */ | 2036 | */ |
2037 | get_device(dev->parent); | 2037 | parent = get_device(dev->parent); |
2038 | get_device(dev); | 2038 | get_device(dev); |
2039 | /* | 2039 | /* |
2040 | * Make sure the device is off the kset list, in the | 2040 | * Make sure the device is off the kset list, in the |
@@ -2044,8 +2044,8 @@ void device_shutdown(void) | |||
2044 | spin_unlock(&devices_kset->list_lock); | 2044 | spin_unlock(&devices_kset->list_lock); |
2045 | 2045 | ||
2046 | /* hold lock to avoid race with probe/release */ | 2046 | /* hold lock to avoid race with probe/release */ |
2047 | if (dev->parent) | 2047 | if (parent) |
2048 | device_lock(dev->parent); | 2048 | device_lock(parent); |
2049 | device_lock(dev); | 2049 | device_lock(dev); |
2050 | 2050 | ||
2051 | /* Don't allow any more runtime suspends */ | 2051 | /* Don't allow any more runtime suspends */ |
@@ -2063,11 +2063,11 @@ void device_shutdown(void) | |||
2063 | } | 2063 | } |
2064 | 2064 | ||
2065 | device_unlock(dev); | 2065 | device_unlock(dev); |
2066 | if (dev->parent) | 2066 | if (parent) |
2067 | device_unlock(dev->parent); | 2067 | device_unlock(parent); |
2068 | 2068 | ||
2069 | put_device(dev); | 2069 | put_device(dev); |
2070 | put_device(dev->parent); | 2070 | put_device(parent); |
2071 | 2071 | ||
2072 | spin_lock(&devices_kset->list_lock); | 2072 | spin_lock(&devices_kset->list_lock); |
2073 | } | 2073 | } |
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index c9fd6943ce45..50329d1057ed 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c | |||
@@ -210,25 +210,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) | |||
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
213 | static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up) | ||
214 | { | ||
215 | u16 data; | ||
216 | |||
217 | if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) { | ||
218 | data = up ? 0x74 : 0x7C; | ||
219 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
220 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64); | ||
221 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
222 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
223 | } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) { | ||
224 | data = up ? 0x75 : 0x7D; | ||
225 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
226 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65); | ||
227 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
228 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | /************************************************** | 213 | /************************************************** |
233 | * Init. | 214 | * Init. |
234 | **************************************************/ | 215 | **************************************************/ |
@@ -255,6 +236,32 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc) | |||
255 | bcma_core_pci_clientmode_init(pc); | 236 | bcma_core_pci_clientmode_init(pc); |
256 | } | 237 | } |
257 | 238 | ||
239 | void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) | ||
240 | { | ||
241 | struct bcma_drv_pci *pc; | ||
242 | u16 data; | ||
243 | |||
244 | if (bus->hosttype != BCMA_HOSTTYPE_PCI) | ||
245 | return; | ||
246 | |||
247 | pc = &bus->drv_pci[0]; | ||
248 | |||
249 | if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) { | ||
250 | data = up ? 0x74 : 0x7C; | ||
251 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
252 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64); | ||
253 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
254 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
255 | } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) { | ||
256 | data = up ? 0x75 : 0x7D; | ||
257 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
258 | BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65); | ||
259 | bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1, | ||
260 | BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data); | ||
261 | } | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(bcma_core_pci_power_save); | ||
264 | |||
258 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, | 265 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, |
259 | bool enable) | 266 | bool enable) |
260 | { | 267 | { |
@@ -310,8 +317,6 @@ void bcma_core_pci_up(struct bcma_bus *bus) | |||
310 | 317 | ||
311 | pc = &bus->drv_pci[0]; | 318 | pc = &bus->drv_pci[0]; |
312 | 319 | ||
313 | bcma_core_pci_power_save(pc, true); | ||
314 | |||
315 | bcma_core_pci_extend_L1timer(pc, true); | 320 | bcma_core_pci_extend_L1timer(pc, true); |
316 | } | 321 | } |
317 | EXPORT_SYMBOL_GPL(bcma_core_pci_up); | 322 | EXPORT_SYMBOL_GPL(bcma_core_pci_up); |
@@ -326,7 +331,5 @@ void bcma_core_pci_down(struct bcma_bus *bus) | |||
326 | pc = &bus->drv_pci[0]; | 331 | pc = &bus->drv_pci[0]; |
327 | 332 | ||
328 | bcma_core_pci_extend_L1timer(pc, false); | 333 | bcma_core_pci_extend_L1timer(pc, false); |
329 | |||
330 | bcma_core_pci_power_save(pc, false); | ||
331 | } | 334 | } |
332 | EXPORT_SYMBOL_GPL(bcma_core_pci_down); | 335 | EXPORT_SYMBOL_GPL(bcma_core_pci_down); |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index d2d95ff5353b..edfa2515bc86 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, | |||
1189 | int err; | 1189 | int err; |
1190 | u32 cp; | 1190 | u32 cp; |
1191 | 1191 | ||
1192 | memset(&arg64, 0, sizeof(arg64)); | ||
1192 | err = 0; | 1193 | err = 0; |
1193 | err |= | 1194 | err |= |
1194 | copy_from_user(&arg64.LUN_info, &arg32->LUN_info, | 1195 | copy_from_user(&arg64.LUN_info, &arg32->LUN_info, |
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 639d26b90b91..2b9440384536 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -1193,6 +1193,7 @@ out_passthru: | |||
1193 | ida_pci_info_struct pciinfo; | 1193 | ida_pci_info_struct pciinfo; |
1194 | 1194 | ||
1195 | if (!arg) return -EINVAL; | 1195 | if (!arg) return -EINVAL; |
1196 | memset(&pciinfo, 0, sizeof(pciinfo)); | ||
1196 | pciinfo.bus = host->pci_dev->bus->number; | 1197 | pciinfo.bus = host->pci_dev->bus->number; |
1197 | pciinfo.dev_fn = host->pci_dev->devfn; | 1198 | pciinfo.dev_fn = host->pci_dev->devfn; |
1198 | pciinfo.board_id = host->board_id; | 1199 | pciinfo.board_id = host->board_id; |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a12b923bbaca..0a327f4154a2 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -85,6 +85,7 @@ static struct usb_device_id ath3k_table[] = { | |||
85 | { USB_DEVICE(0x04CA, 0x3008) }, | 85 | { USB_DEVICE(0x04CA, 0x3008) }, |
86 | { USB_DEVICE(0x13d3, 0x3362) }, | 86 | { USB_DEVICE(0x13d3, 0x3362) }, |
87 | { USB_DEVICE(0x0CF3, 0xE004) }, | 87 | { USB_DEVICE(0x0CF3, 0xE004) }, |
88 | { USB_DEVICE(0x0CF3, 0xE005) }, | ||
88 | { USB_DEVICE(0x0930, 0x0219) }, | 89 | { USB_DEVICE(0x0930, 0x0219) }, |
89 | { USB_DEVICE(0x0489, 0xe057) }, | 90 | { USB_DEVICE(0x0489, 0xe057) }, |
90 | { USB_DEVICE(0x13d3, 0x3393) }, | 91 | { USB_DEVICE(0x13d3, 0x3393) }, |
@@ -126,6 +127,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { | |||
126 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 127 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
127 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, | 128 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, |
128 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 129 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
130 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | ||
129 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 131 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
130 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 132 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
131 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 133 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8e16f0af6358..f3dfc0a88fdc 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -102,6 +102,7 @@ static struct usb_device_id btusb_table[] = { | |||
102 | 102 | ||
103 | /* Broadcom BCM20702A0 */ | 103 | /* Broadcom BCM20702A0 */ |
104 | { USB_DEVICE(0x0b05, 0x17b5) }, | 104 | { USB_DEVICE(0x0b05, 0x17b5) }, |
105 | { USB_DEVICE(0x0b05, 0x17cb) }, | ||
105 | { USB_DEVICE(0x04ca, 0x2003) }, | 106 | { USB_DEVICE(0x04ca, 0x2003) }, |
106 | { USB_DEVICE(0x0489, 0xe042) }, | 107 | { USB_DEVICE(0x0489, 0xe042) }, |
107 | { USB_DEVICE(0x413c, 0x8197) }, | 108 | { USB_DEVICE(0x413c, 0x8197) }, |
@@ -112,6 +113,9 @@ static struct usb_device_id btusb_table[] = { | |||
112 | /*Broadcom devices with vendor specific id */ | 113 | /*Broadcom devices with vendor specific id */ |
113 | { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, | 114 | { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, |
114 | 115 | ||
116 | /* Belkin F8065bf - Broadcom based */ | ||
117 | { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) }, | ||
118 | |||
115 | { } /* Terminating entry */ | 119 | { } /* Terminating entry */ |
116 | }; | 120 | }; |
117 | 121 | ||
@@ -148,6 +152,7 @@ static struct usb_device_id blacklist_table[] = { | |||
148 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 152 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
149 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, | 153 | { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, |
150 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, | 154 | { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, |
155 | { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, | ||
151 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, | 156 | { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, |
152 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, | 157 | { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, |
153 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, | 158 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 19ab6ff53d59..2394e9753ef5 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
@@ -700,6 +700,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, | |||
700 | phys_addr_t sdramwins_phys_base, | 700 | phys_addr_t sdramwins_phys_base, |
701 | size_t sdramwins_size) | 701 | size_t sdramwins_size) |
702 | { | 702 | { |
703 | struct device_node *np; | ||
703 | int win; | 704 | int win; |
704 | 705 | ||
705 | mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); | 706 | mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); |
@@ -712,8 +713,11 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, | |||
712 | return -ENOMEM; | 713 | return -ENOMEM; |
713 | } | 714 | } |
714 | 715 | ||
715 | if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric")) | 716 | np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); |
717 | if (np) { | ||
716 | mbus->hw_io_coherency = 1; | 718 | mbus->hw_io_coherency = 1; |
719 | of_node_put(np); | ||
720 | } | ||
717 | 721 | ||
718 | for (win = 0; win < mbus->soc->num_wins; win++) | 722 | for (win = 0; win < mbus->soc->num_wins; win++) |
719 | mvebu_mbus_disable_window(mbus, win); | 723 | mvebu_mbus_disable_window(mbus, win); |
@@ -861,11 +865,13 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np, | |||
861 | int ret; | 865 | int ret; |
862 | 866 | ||
863 | /* | 867 | /* |
864 | * These are optional, so we clear them and they'll | 868 | * These are optional, so we make sure that resource_size(x) will |
865 | * be zero if they are missing from the DT. | 869 | * return 0. |
866 | */ | 870 | */ |
867 | memset(mem, 0, sizeof(struct resource)); | 871 | memset(mem, 0, sizeof(struct resource)); |
872 | mem->end = -1; | ||
868 | memset(io, 0, sizeof(struct resource)); | 873 | memset(io, 0, sizeof(struct resource)); |
874 | io->end = -1; | ||
869 | 875 | ||
870 | ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg)); | 876 | ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg)); |
871 | if (!ret) { | 877 | if (!ret) { |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 7737b5bd26af..7a744d391756 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -640,7 +640,7 @@ struct timer_rand_state { | |||
640 | */ | 640 | */ |
641 | void add_device_randomness(const void *buf, unsigned int size) | 641 | void add_device_randomness(const void *buf, unsigned int size) |
642 | { | 642 | { |
643 | unsigned long time = get_cycles() ^ jiffies; | 643 | unsigned long time = random_get_entropy() ^ jiffies; |
644 | 644 | ||
645 | mix_pool_bytes(&input_pool, buf, size, NULL); | 645 | mix_pool_bytes(&input_pool, buf, size, NULL); |
646 | mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); | 646 | mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); |
@@ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |||
677 | goto out; | 677 | goto out; |
678 | 678 | ||
679 | sample.jiffies = jiffies; | 679 | sample.jiffies = jiffies; |
680 | sample.cycles = get_cycles(); | 680 | sample.cycles = random_get_entropy(); |
681 | sample.num = num; | 681 | sample.num = num; |
682 | mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); | 682 | mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); |
683 | 683 | ||
@@ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags) | |||
744 | struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); | 744 | struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); |
745 | struct pt_regs *regs = get_irq_regs(); | 745 | struct pt_regs *regs = get_irq_regs(); |
746 | unsigned long now = jiffies; | 746 | unsigned long now = jiffies; |
747 | __u32 input[4], cycles = get_cycles(); | 747 | __u32 input[4], cycles = random_get_entropy(); |
748 | 748 | ||
749 | input[0] = cycles ^ jiffies; | 749 | input[0] = cycles ^ jiffies; |
750 | input[1] = irq; | 750 | input[1] = irq; |
@@ -1459,12 +1459,11 @@ struct ctl_table random_table[] = { | |||
1459 | 1459 | ||
1460 | static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; | 1460 | static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; |
1461 | 1461 | ||
1462 | static int __init random_int_secret_init(void) | 1462 | int random_int_secret_init(void) |
1463 | { | 1463 | { |
1464 | get_random_bytes(random_int_secret, sizeof(random_int_secret)); | 1464 | get_random_bytes(random_int_secret, sizeof(random_int_secret)); |
1465 | return 0; | 1465 | return 0; |
1466 | } | 1466 | } |
1467 | late_initcall(random_int_secret_init); | ||
1468 | 1467 | ||
1469 | /* | 1468 | /* |
1470 | * Get a random word for internal kernel use only. Similar to urandom but | 1469 | * Get a random word for internal kernel use only. Similar to urandom but |
@@ -1483,7 +1482,7 @@ unsigned int get_random_int(void) | |||
1483 | 1482 | ||
1484 | hash = get_cpu_var(get_random_int_hash); | 1483 | hash = get_cpu_var(get_random_int_hash); |
1485 | 1484 | ||
1486 | hash[0] += current->pid + jiffies + get_cycles(); | 1485 | hash[0] += current->pid + jiffies + random_get_entropy(); |
1487 | md5_transform(hash, random_int_secret); | 1486 | md5_transform(hash, random_int_secret); |
1488 | ret = hash[0]; | 1487 | ret = hash[0]; |
1489 | put_cpu_var(get_random_int_hash); | 1488 | put_cpu_var(get_random_int_hash); |
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 7a7929ba2658..94c280d36e8b 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/err.h> | 11 | #include <linux/err.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <xen/xen.h> | ||
13 | #include <xen/events.h> | 14 | #include <xen/events.h> |
14 | #include <xen/interface/io/tpmif.h> | 15 | #include <xen/interface/io/tpmif.h> |
15 | #include <xen/grant_table.h> | 16 | #include <xen/grant_table.h> |
@@ -142,32 +143,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
142 | return length; | 143 | return length; |
143 | } | 144 | } |
144 | 145 | ||
145 | ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr, | ||
146 | char *buf) | ||
147 | { | ||
148 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
149 | struct tpm_private *priv = TPM_VPRIV(chip); | ||
150 | u8 locality = priv->shr->locality; | ||
151 | |||
152 | return sprintf(buf, "%d\n", locality); | ||
153 | } | ||
154 | |||
155 | ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr, | ||
156 | const char *buf, size_t len) | ||
157 | { | ||
158 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
159 | struct tpm_private *priv = TPM_VPRIV(chip); | ||
160 | u8 val; | ||
161 | |||
162 | int rv = kstrtou8(buf, 0, &val); | ||
163 | if (rv) | ||
164 | return rv; | ||
165 | |||
166 | priv->shr->locality = val; | ||
167 | |||
168 | return len; | ||
169 | } | ||
170 | |||
171 | static const struct file_operations vtpm_ops = { | 146 | static const struct file_operations vtpm_ops = { |
172 | .owner = THIS_MODULE, | 147 | .owner = THIS_MODULE, |
173 | .llseek = no_llseek, | 148 | .llseek = no_llseek, |
@@ -188,8 +163,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); | |||
188 | static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); | 163 | static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); |
189 | static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); | 164 | static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); |
190 | static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); | 165 | static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); |
191 | static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality, | ||
192 | tpm_store_locality); | ||
193 | 166 | ||
194 | static struct attribute *vtpm_attrs[] = { | 167 | static struct attribute *vtpm_attrs[] = { |
195 | &dev_attr_pubek.attr, | 168 | &dev_attr_pubek.attr, |
@@ -202,7 +175,6 @@ static struct attribute *vtpm_attrs[] = { | |||
202 | &dev_attr_cancel.attr, | 175 | &dev_attr_cancel.attr, |
203 | &dev_attr_durations.attr, | 176 | &dev_attr_durations.attr, |
204 | &dev_attr_timeouts.attr, | 177 | &dev_attr_timeouts.attr, |
205 | &dev_attr_locality.attr, | ||
206 | NULL, | 178 | NULL, |
207 | }; | 179 | }; |
208 | 180 | ||
@@ -210,8 +182,6 @@ static struct attribute_group vtpm_attr_grp = { | |||
210 | .attrs = vtpm_attrs, | 182 | .attrs = vtpm_attrs, |
211 | }; | 183 | }; |
212 | 184 | ||
213 | #define TPM_LONG_TIMEOUT (10 * 60 * HZ) | ||
214 | |||
215 | static const struct tpm_vendor_specific tpm_vtpm = { | 185 | static const struct tpm_vendor_specific tpm_vtpm = { |
216 | .status = vtpm_status, | 186 | .status = vtpm_status, |
217 | .recv = vtpm_recv, | 187 | .recv = vtpm_recv, |
@@ -224,11 +194,6 @@ static const struct tpm_vendor_specific tpm_vtpm = { | |||
224 | .miscdev = { | 194 | .miscdev = { |
225 | .fops = &vtpm_ops, | 195 | .fops = &vtpm_ops, |
226 | }, | 196 | }, |
227 | .duration = { | ||
228 | TPM_LONG_TIMEOUT, | ||
229 | TPM_LONG_TIMEOUT, | ||
230 | TPM_LONG_TIMEOUT, | ||
231 | }, | ||
232 | }; | 197 | }; |
233 | 198 | ||
234 | static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) | 199 | static irqreturn_t tpmif_interrupt(int dummy, void *dev_id) |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 41c69469ce20..971d796e071d 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -26,6 +26,7 @@ config DW_APB_TIMER_OF | |||
26 | 26 | ||
27 | config ARMADA_370_XP_TIMER | 27 | config ARMADA_370_XP_TIMER |
28 | bool | 28 | bool |
29 | select CLKSRC_OF | ||
29 | 30 | ||
30 | config ORION_TIMER | 31 | config ORION_TIMER |
31 | select CLKSRC_OF | 32 | select CLKSRC_OF |
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c index 37f5325bec95..b9ddd9e3a2f5 100644 --- a/drivers/clocksource/clksrc-of.c +++ b/drivers/clocksource/clksrc-of.c | |||
@@ -30,6 +30,9 @@ void __init clocksource_of_init(void) | |||
30 | clocksource_of_init_fn init_func; | 30 | clocksource_of_init_fn init_func; |
31 | 31 | ||
32 | for_each_matching_node_and_match(np, __clksrc_of_table, &match) { | 32 | for_each_matching_node_and_match(np, __clksrc_of_table, &match) { |
33 | if (!of_device_is_available(np)) | ||
34 | continue; | ||
35 | |||
33 | init_func = match->data; | 36 | init_func = match->data; |
34 | init_func(np); | 37 | init_func(np); |
35 | } | 38 | } |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index b9c81b7c3a3b..3a5909c12d42 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c | |||
@@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) | |||
301 | ced->name = dev_name(&p->pdev->dev); | 301 | ced->name = dev_name(&p->pdev->dev); |
302 | ced->features = CLOCK_EVT_FEAT_ONESHOT; | 302 | ced->features = CLOCK_EVT_FEAT_ONESHOT; |
303 | ced->rating = 200; | 303 | ced->rating = 200; |
304 | ced->cpumask = cpumask_of(0); | 304 | ced->cpumask = cpu_possible_mask; |
305 | ced->set_next_event = em_sti_clock_event_next; | 305 | ced->set_next_event = em_sti_clock_event_next; |
306 | ced->set_mode = em_sti_clock_event_mode; | 306 | ced->set_mode = em_sti_clock_event_mode; |
307 | 307 | ||
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 5b34768f4d7c..62b0de6a1837 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) | |||
428 | evt->irq); | 428 | evt->irq); |
429 | return -EIO; | 429 | return -EIO; |
430 | } | 430 | } |
431 | irq_set_affinity(evt->irq, cpumask_of(cpu)); | ||
432 | } else { | 431 | } else { |
433 | enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); | 432 | enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); |
434 | } | 433 | } |
@@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self, | |||
449 | unsigned long action, void *hcpu) | 448 | unsigned long action, void *hcpu) |
450 | { | 449 | { |
451 | struct mct_clock_event_device *mevt; | 450 | struct mct_clock_event_device *mevt; |
451 | unsigned int cpu; | ||
452 | 452 | ||
453 | /* | 453 | /* |
454 | * Grab cpu pointer in each case to avoid spurious | 454 | * Grab cpu pointer in each case to avoid spurious |
@@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self, | |||
459 | mevt = this_cpu_ptr(&percpu_mct_tick); | 459 | mevt = this_cpu_ptr(&percpu_mct_tick); |
460 | exynos4_local_timer_setup(&mevt->evt); | 460 | exynos4_local_timer_setup(&mevt->evt); |
461 | break; | 461 | break; |
462 | case CPU_ONLINE: | ||
463 | cpu = (unsigned long)hcpu; | ||
464 | if (mct_int_type == MCT_INT_SPI) | ||
465 | irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu], | ||
466 | cpumask_of(cpu)); | ||
467 | break; | ||
462 | case CPU_DYING: | 468 | case CPU_DYING: |
463 | mevt = this_cpu_ptr(&percpu_mct_tick); | 469 | mevt = this_cpu_ptr(&percpu_mct_tick); |
464 | exynos4_local_timer_stop(&mevt->evt); | 470 | exynos4_local_timer_stop(&mevt->evt); |
@@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem | |||
500 | &percpu_mct_tick); | 506 | &percpu_mct_tick); |
501 | WARN(err, "MCT: can't request IRQ %d (%d)\n", | 507 | WARN(err, "MCT: can't request IRQ %d (%d)\n", |
502 | mct_irqs[MCT_L0_IRQ], err); | 508 | mct_irqs[MCT_L0_IRQ], err); |
509 | } else { | ||
510 | irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0)); | ||
503 | } | 511 | } |
504 | 512 | ||
505 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); | 513 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index a1260b4549db..d2c3253e015e 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void) | |||
986 | { | 986 | { |
987 | int ret; | 987 | int ret; |
988 | 988 | ||
989 | /* don't keep reloading if cpufreq_driver exists */ | ||
990 | if (cpufreq_get_current_driver()) | ||
991 | return 0; | ||
992 | |||
989 | if (acpi_disabled) | 993 | if (acpi_disabled) |
990 | return 0; | 994 | return 0; |
991 | 995 | ||
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 78c49d8e0f4a..c522a95c0e16 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -229,7 +229,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) | |||
229 | if (of_property_read_u32(np, "clock-latency", &transition_latency)) | 229 | if (of_property_read_u32(np, "clock-latency", &transition_latency)) |
230 | transition_latency = CPUFREQ_ETERNAL; | 230 | transition_latency = CPUFREQ_ETERNAL; |
231 | 231 | ||
232 | if (cpu_reg) { | 232 | if (!IS_ERR(cpu_reg)) { |
233 | struct opp *opp; | 233 | struct opp *opp; |
234 | unsigned long min_uV, max_uV; | 234 | unsigned long min_uV, max_uV; |
235 | int i; | 235 | int i; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 89b3c52cd5c3..04548f7023af 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu) | |||
1460 | { | 1460 | { |
1461 | unsigned int ret_freq = 0; | 1461 | unsigned int ret_freq = 0; |
1462 | 1462 | ||
1463 | if (cpufreq_disabled() || !cpufreq_driver) | ||
1464 | return -ENOENT; | ||
1465 | |||
1463 | if (!down_read_trylock(&cpufreq_rwsem)) | 1466 | if (!down_read_trylock(&cpufreq_rwsem)) |
1464 | return 0; | 1467 | return 0; |
1465 | 1468 | ||
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index d514c152fd1a..be5380ecdcd4 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c | |||
@@ -457,7 +457,7 @@ err_free_table: | |||
457 | opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); | 457 | opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table); |
458 | err_put_node: | 458 | err_put_node: |
459 | of_node_put(np); | 459 | of_node_put(np); |
460 | dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__); | 460 | dev_err(&pdev->dev, "%s: failed initialization\n", __func__); |
461 | return ret; | 461 | return ret; |
462 | } | 462 | } |
463 | 463 | ||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 9733f29ed148..32b3479a2405 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -394,7 +394,10 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | |||
394 | trace_cpu_frequency(pstate * 100000, cpu->cpu); | 394 | trace_cpu_frequency(pstate * 100000, cpu->cpu); |
395 | 395 | ||
396 | cpu->pstate.current_pstate = pstate; | 396 | cpu->pstate.current_pstate = pstate; |
397 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); | 397 | if (limits.no_turbo) |
398 | wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8)); | ||
399 | else | ||
400 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); | ||
398 | 401 | ||
399 | } | 402 | } |
400 | 403 | ||
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 19e364fa5955..3f418166ce02 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c | |||
@@ -113,7 +113,7 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, | |||
113 | unsigned int target_freq, unsigned int relation) | 113 | unsigned int target_freq, unsigned int relation) |
114 | { | 114 | { |
115 | struct cpufreq_freqs freqs; | 115 | struct cpufreq_freqs freqs; |
116 | unsigned long newfreq; | 116 | long newfreq; |
117 | struct clk *srcclk; | 117 | struct clk *srcclk; |
118 | int index, ret, mult = 1; | 118 | int index, ret, mult = 1; |
119 | 119 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 526ec77c7ba0..f238cfd33847 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -198,6 +198,7 @@ config TI_EDMA | |||
198 | depends on ARCH_DAVINCI || ARCH_OMAP | 198 | depends on ARCH_DAVINCI || ARCH_OMAP |
199 | select DMA_ENGINE | 199 | select DMA_ENGINE |
200 | select DMA_VIRTUAL_CHANNELS | 200 | select DMA_VIRTUAL_CHANNELS |
201 | select TI_PRIV_EDMA | ||
201 | default n | 202 | default n |
202 | help | 203 | help |
203 | Enable support for the TI EDMA controller. This DMA | 204 | Enable support for the TI EDMA controller. This DMA |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index ff50ff4c6a57..3519111c566b 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -306,6 +306,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
306 | EDMA_SLOT_ANY); | 306 | EDMA_SLOT_ANY); |
307 | if (echan->slot[i] < 0) { | 307 | if (echan->slot[i] < 0) { |
308 | dev_err(dev, "Failed to allocate slot\n"); | 308 | dev_err(dev, "Failed to allocate slot\n"); |
309 | kfree(edesc); | ||
309 | return NULL; | 310 | return NULL; |
310 | } | 311 | } |
311 | } | 312 | } |
@@ -749,6 +750,6 @@ static void __exit edma_exit(void) | |||
749 | } | 750 | } |
750 | module_exit(edma_exit); | 751 | module_exit(edma_exit); |
751 | 752 | ||
752 | MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); | 753 | MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>"); |
753 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); | 754 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); |
754 | MODULE_LICENSE("GPL v2"); | 755 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 78f8ca5fccee..55852c026791 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) | |||
437 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 437 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
438 | int chno = imxdmac->channel; | 438 | int chno = imxdmac->channel; |
439 | struct imxdma_desc *desc; | 439 | struct imxdma_desc *desc; |
440 | unsigned long flags; | ||
440 | 441 | ||
441 | spin_lock(&imxdma->lock); | 442 | spin_lock_irqsave(&imxdma->lock, flags); |
442 | if (list_empty(&imxdmac->ld_active)) { | 443 | if (list_empty(&imxdmac->ld_active)) { |
443 | spin_unlock(&imxdma->lock); | 444 | spin_unlock_irqrestore(&imxdma->lock, flags); |
444 | goto out; | 445 | goto out; |
445 | } | 446 | } |
446 | 447 | ||
447 | desc = list_first_entry(&imxdmac->ld_active, | 448 | desc = list_first_entry(&imxdmac->ld_active, |
448 | struct imxdma_desc, | 449 | struct imxdma_desc, |
449 | node); | 450 | node); |
450 | spin_unlock(&imxdma->lock); | 451 | spin_unlock_irqrestore(&imxdma->lock, flags); |
451 | 452 | ||
452 | if (desc->sg) { | 453 | if (desc->sg) { |
453 | u32 tmp; | 454 | u32 tmp; |
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
519 | { | 520 | { |
520 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | 521 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
521 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 522 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
522 | unsigned long flags; | ||
523 | int slot = -1; | 523 | int slot = -1; |
524 | int i; | 524 | int i; |
525 | 525 | ||
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
527 | switch (d->type) { | 527 | switch (d->type) { |
528 | case IMXDMA_DESC_INTERLEAVED: | 528 | case IMXDMA_DESC_INTERLEAVED: |
529 | /* Try to get a free 2D slot */ | 529 | /* Try to get a free 2D slot */ |
530 | spin_lock_irqsave(&imxdma->lock, flags); | ||
531 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { | 530 | for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { |
532 | if ((imxdma->slots_2d[i].count > 0) && | 531 | if ((imxdma->slots_2d[i].count > 0) && |
533 | ((imxdma->slots_2d[i].xsr != d->x) || | 532 | ((imxdma->slots_2d[i].xsr != d->x) || |
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
537 | slot = i; | 536 | slot = i; |
538 | break; | 537 | break; |
539 | } | 538 | } |
540 | if (slot < 0) { | 539 | if (slot < 0) |
541 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
542 | return -EBUSY; | 540 | return -EBUSY; |
543 | } | ||
544 | 541 | ||
545 | imxdma->slots_2d[slot].xsr = d->x; | 542 | imxdma->slots_2d[slot].xsr = d->x; |
546 | imxdma->slots_2d[slot].ysr = d->y; | 543 | imxdma->slots_2d[slot].ysr = d->y; |
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
549 | 546 | ||
550 | imxdmac->slot_2d = slot; | 547 | imxdmac->slot_2d = slot; |
551 | imxdmac->enabled_2d = true; | 548 | imxdmac->enabled_2d = true; |
552 | spin_unlock_irqrestore(&imxdma->lock, flags); | ||
553 | 549 | ||
554 | if (slot == IMX_DMA_2D_SLOT_A) { | 550 | if (slot == IMX_DMA_2D_SLOT_A) { |
555 | d->config_mem &= ~CCR_MSEL_B; | 551 | d->config_mem &= ~CCR_MSEL_B; |
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data) | |||
625 | struct imxdma_channel *imxdmac = (void *)data; | 621 | struct imxdma_channel *imxdmac = (void *)data; |
626 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 622 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
627 | struct imxdma_desc *desc; | 623 | struct imxdma_desc *desc; |
624 | unsigned long flags; | ||
628 | 625 | ||
629 | spin_lock(&imxdma->lock); | 626 | spin_lock_irqsave(&imxdma->lock, flags); |
630 | 627 | ||
631 | if (list_empty(&imxdmac->ld_active)) { | 628 | if (list_empty(&imxdmac->ld_active)) { |
632 | /* Someone might have called terminate all */ | 629 | /* Someone might have called terminate all */ |
633 | goto out; | 630 | spin_unlock_irqrestore(&imxdma->lock, flags); |
631 | return; | ||
634 | } | 632 | } |
635 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); | 633 | desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); |
636 | 634 | ||
637 | if (desc->desc.callback) | ||
638 | desc->desc.callback(desc->desc.callback_param); | ||
639 | |||
640 | /* If we are dealing with a cyclic descriptor, keep it on ld_active | 635 | /* If we are dealing with a cyclic descriptor, keep it on ld_active |
641 | * and dont mark the descriptor as complete. | 636 | * and dont mark the descriptor as complete. |
642 | * Only in non-cyclic cases it would be marked as complete | 637 | * Only in non-cyclic cases it would be marked as complete |
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data) | |||
663 | __func__, imxdmac->channel); | 658 | __func__, imxdmac->channel); |
664 | } | 659 | } |
665 | out: | 660 | out: |
666 | spin_unlock(&imxdma->lock); | 661 | spin_unlock_irqrestore(&imxdma->lock, flags); |
662 | |||
663 | if (desc->desc.callback) | ||
664 | desc->desc.callback(desc->desc.callback_param); | ||
665 | |||
667 | } | 666 | } |
668 | 667 | ||
669 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 668 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
883 | kfree(imxdmac->sg_list); | 882 | kfree(imxdmac->sg_list); |
884 | 883 | ||
885 | imxdmac->sg_list = kcalloc(periods + 1, | 884 | imxdmac->sg_list = kcalloc(periods + 1, |
886 | sizeof(struct scatterlist), GFP_KERNEL); | 885 | sizeof(struct scatterlist), GFP_ATOMIC); |
887 | if (!imxdmac->sg_list) | 886 | if (!imxdmac->sg_list) |
888 | return NULL; | 887 | return NULL; |
889 | 888 | ||
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index 45a520281ce1..ebad84591a6e 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c | |||
@@ -93,6 +93,7 @@ struct hpb_dmae_chan { | |||
93 | void __iomem *base; | 93 | void __iomem *base; |
94 | const struct hpb_dmae_slave_config *cfg; | 94 | const struct hpb_dmae_slave_config *cfg; |
95 | char dev_id[16]; /* unique name per DMAC of channel */ | 95 | char dev_id[16]; /* unique name per DMAC of channel */ |
96 | dma_addr_t slave_addr; | ||
96 | }; | 97 | }; |
97 | 98 | ||
98 | struct hpb_dmae_device { | 99 | struct hpb_dmae_device { |
@@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan, | |||
432 | hpb_chan->xfer_mode = XFER_DOUBLE; | 433 | hpb_chan->xfer_mode = XFER_DOUBLE; |
433 | } else { | 434 | } else { |
434 | dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); | 435 | dev_err(hpb_chan->shdma_chan.dev, "DCR setting error"); |
435 | shdma_free_irq(&hpb_chan->shdma_chan); | ||
436 | return -EINVAL; | 436 | return -EINVAL; |
437 | } | 437 | } |
438 | 438 | ||
@@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan, | |||
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) | 449 | static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, |
450 | dma_addr_t slave_addr, bool try) | ||
450 | { | 451 | { |
451 | struct hpb_dmae_chan *chan = to_chan(schan); | 452 | struct hpb_dmae_chan *chan = to_chan(schan); |
452 | const struct hpb_dmae_slave_config *sc = | 453 | const struct hpb_dmae_slave_config *sc = |
@@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try) | |||
457 | if (try) | 458 | if (try) |
458 | return 0; | 459 | return 0; |
459 | chan->cfg = sc; | 460 | chan->cfg = sc; |
461 | chan->slave_addr = slave_addr ? : sc->addr; | ||
460 | return hpb_dmae_alloc_chan_resources(chan, sc); | 462 | return hpb_dmae_alloc_chan_resources(chan, sc); |
461 | } | 463 | } |
462 | 464 | ||
@@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan) | |||
468 | { | 470 | { |
469 | struct hpb_dmae_chan *chan = to_chan(schan); | 471 | struct hpb_dmae_chan *chan = to_chan(schan); |
470 | 472 | ||
471 | return chan->cfg->addr; | 473 | return chan->slave_addr; |
472 | } | 474 | } |
473 | 475 | ||
474 | static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) | 476 | static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i) |
@@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) | |||
614 | shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { | 616 | shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) { |
615 | BUG_ON(!schan); | 617 | BUG_ON(!schan); |
616 | 618 | ||
617 | shdma_free_irq(schan); | ||
618 | shdma_chan_remove(schan); | 619 | shdma_chan_remove(schan); |
619 | } | 620 | } |
620 | dma_dev->chancnt = 0; | 621 | dma_dev->chancnt = 0; |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0ff43552d472..89675f862308 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -63,6 +63,7 @@ struct gpio_bank { | |||
63 | struct gpio_chip chip; | 63 | struct gpio_chip chip; |
64 | struct clk *dbck; | 64 | struct clk *dbck; |
65 | u32 mod_usage; | 65 | u32 mod_usage; |
66 | u32 irq_usage; | ||
66 | u32 dbck_enable_mask; | 67 | u32 dbck_enable_mask; |
67 | bool dbck_enabled; | 68 | bool dbck_enabled; |
68 | struct device *dev; | 69 | struct device *dev; |
@@ -86,6 +87,9 @@ struct gpio_bank { | |||
86 | #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) | 87 | #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) |
87 | #define GPIO_MOD_CTRL_BIT BIT(0) | 88 | #define GPIO_MOD_CTRL_BIT BIT(0) |
88 | 89 | ||
90 | #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) | ||
91 | #define LINE_USED(line, offset) (line & (1 << offset)) | ||
92 | |||
89 | static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) | 93 | static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) |
90 | { | 94 | { |
91 | return bank->chip.base + gpio_irq; | 95 | return bank->chip.base + gpio_irq; |
@@ -420,15 +424,69 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, | |||
420 | return 0; | 424 | return 0; |
421 | } | 425 | } |
422 | 426 | ||
427 | static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset) | ||
428 | { | ||
429 | if (bank->regs->pinctrl) { | ||
430 | void __iomem *reg = bank->base + bank->regs->pinctrl; | ||
431 | |||
432 | /* Claim the pin for MPU */ | ||
433 | __raw_writel(__raw_readl(reg) | (1 << offset), reg); | ||
434 | } | ||
435 | |||
436 | if (bank->regs->ctrl && !BANK_USED(bank)) { | ||
437 | void __iomem *reg = bank->base + bank->regs->ctrl; | ||
438 | u32 ctrl; | ||
439 | |||
440 | ctrl = __raw_readl(reg); | ||
441 | /* Module is enabled, clocks are not gated */ | ||
442 | ctrl &= ~GPIO_MOD_CTRL_BIT; | ||
443 | __raw_writel(ctrl, reg); | ||
444 | bank->context.ctrl = ctrl; | ||
445 | } | ||
446 | } | ||
447 | |||
448 | static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset) | ||
449 | { | ||
450 | void __iomem *base = bank->base; | ||
451 | |||
452 | if (bank->regs->wkup_en && | ||
453 | !LINE_USED(bank->mod_usage, offset) && | ||
454 | !LINE_USED(bank->irq_usage, offset)) { | ||
455 | /* Disable wake-up during idle for dynamic tick */ | ||
456 | _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0); | ||
457 | bank->context.wake_en = | ||
458 | __raw_readl(bank->base + bank->regs->wkup_en); | ||
459 | } | ||
460 | |||
461 | if (bank->regs->ctrl && !BANK_USED(bank)) { | ||
462 | void __iomem *reg = bank->base + bank->regs->ctrl; | ||
463 | u32 ctrl; | ||
464 | |||
465 | ctrl = __raw_readl(reg); | ||
466 | /* Module is disabled, clocks are gated */ | ||
467 | ctrl |= GPIO_MOD_CTRL_BIT; | ||
468 | __raw_writel(ctrl, reg); | ||
469 | bank->context.ctrl = ctrl; | ||
470 | } | ||
471 | } | ||
472 | |||
473 | static int gpio_is_input(struct gpio_bank *bank, int mask) | ||
474 | { | ||
475 | void __iomem *reg = bank->base + bank->regs->direction; | ||
476 | |||
477 | return __raw_readl(reg) & mask; | ||
478 | } | ||
479 | |||
423 | static int gpio_irq_type(struct irq_data *d, unsigned type) | 480 | static int gpio_irq_type(struct irq_data *d, unsigned type) |
424 | { | 481 | { |
425 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); | 482 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); |
426 | unsigned gpio = 0; | 483 | unsigned gpio = 0; |
427 | int retval; | 484 | int retval; |
428 | unsigned long flags; | 485 | unsigned long flags; |
486 | unsigned offset; | ||
429 | 487 | ||
430 | if (WARN_ON(!bank->mod_usage)) | 488 | if (!BANK_USED(bank)) |
431 | return -EINVAL; | 489 | pm_runtime_get_sync(bank->dev); |
432 | 490 | ||
433 | #ifdef CONFIG_ARCH_OMAP1 | 491 | #ifdef CONFIG_ARCH_OMAP1 |
434 | if (d->irq > IH_MPUIO_BASE) | 492 | if (d->irq > IH_MPUIO_BASE) |
@@ -446,7 +504,17 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) | |||
446 | return -EINVAL; | 504 | return -EINVAL; |
447 | 505 | ||
448 | spin_lock_irqsave(&bank->lock, flags); | 506 | spin_lock_irqsave(&bank->lock, flags); |
449 | retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); | 507 | offset = GPIO_INDEX(bank, gpio); |
508 | retval = _set_gpio_triggering(bank, offset, type); | ||
509 | if (!LINE_USED(bank->mod_usage, offset)) { | ||
510 | _enable_gpio_module(bank, offset); | ||
511 | _set_gpio_direction(bank, offset, 1); | ||
512 | } else if (!gpio_is_input(bank, 1 << offset)) { | ||
513 | spin_unlock_irqrestore(&bank->lock, flags); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio); | ||
450 | spin_unlock_irqrestore(&bank->lock, flags); | 518 | spin_unlock_irqrestore(&bank->lock, flags); |
451 | 519 | ||
452 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 520 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
@@ -603,35 +671,19 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) | |||
603 | * If this is the first gpio_request for the bank, | 671 | * If this is the first gpio_request for the bank, |
604 | * enable the bank module. | 672 | * enable the bank module. |
605 | */ | 673 | */ |
606 | if (!bank->mod_usage) | 674 | if (!BANK_USED(bank)) |
607 | pm_runtime_get_sync(bank->dev); | 675 | pm_runtime_get_sync(bank->dev); |
608 | 676 | ||
609 | spin_lock_irqsave(&bank->lock, flags); | 677 | spin_lock_irqsave(&bank->lock, flags); |
610 | /* Set trigger to none. You need to enable the desired trigger with | 678 | /* Set trigger to none. You need to enable the desired trigger with |
611 | * request_irq() or set_irq_type(). | 679 | * request_irq() or set_irq_type(). Only do this if the IRQ line has |
680 | * not already been requested. | ||
612 | */ | 681 | */ |
613 | _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); | 682 | if (!LINE_USED(bank->irq_usage, offset)) { |
614 | 683 | _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); | |
615 | if (bank->regs->pinctrl) { | 684 | _enable_gpio_module(bank, offset); |
616 | void __iomem *reg = bank->base + bank->regs->pinctrl; | ||
617 | |||
618 | /* Claim the pin for MPU */ | ||
619 | __raw_writel(__raw_readl(reg) | (1 << offset), reg); | ||
620 | } | ||
621 | |||
622 | if (bank->regs->ctrl && !bank->mod_usage) { | ||
623 | void __iomem *reg = bank->base + bank->regs->ctrl; | ||
624 | u32 ctrl; | ||
625 | |||
626 | ctrl = __raw_readl(reg); | ||
627 | /* Module is enabled, clocks are not gated */ | ||
628 | ctrl &= ~GPIO_MOD_CTRL_BIT; | ||
629 | __raw_writel(ctrl, reg); | ||
630 | bank->context.ctrl = ctrl; | ||
631 | } | 685 | } |
632 | |||
633 | bank->mod_usage |= 1 << offset; | 686 | bank->mod_usage |= 1 << offset; |
634 | |||
635 | spin_unlock_irqrestore(&bank->lock, flags); | 687 | spin_unlock_irqrestore(&bank->lock, flags); |
636 | 688 | ||
637 | return 0; | 689 | return 0; |
@@ -640,31 +692,11 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) | |||
640 | static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) | 692 | static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) |
641 | { | 693 | { |
642 | struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); | 694 | struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); |
643 | void __iomem *base = bank->base; | ||
644 | unsigned long flags; | 695 | unsigned long flags; |
645 | 696 | ||
646 | spin_lock_irqsave(&bank->lock, flags); | 697 | spin_lock_irqsave(&bank->lock, flags); |
647 | |||
648 | if (bank->regs->wkup_en) { | ||
649 | /* Disable wake-up during idle for dynamic tick */ | ||
650 | _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0); | ||
651 | bank->context.wake_en = | ||
652 | __raw_readl(bank->base + bank->regs->wkup_en); | ||
653 | } | ||
654 | |||
655 | bank->mod_usage &= ~(1 << offset); | 698 | bank->mod_usage &= ~(1 << offset); |
656 | 699 | _disable_gpio_module(bank, offset); | |
657 | if (bank->regs->ctrl && !bank->mod_usage) { | ||
658 | void __iomem *reg = bank->base + bank->regs->ctrl; | ||
659 | u32 ctrl; | ||
660 | |||
661 | ctrl = __raw_readl(reg); | ||
662 | /* Module is disabled, clocks are gated */ | ||
663 | ctrl |= GPIO_MOD_CTRL_BIT; | ||
664 | __raw_writel(ctrl, reg); | ||
665 | bank->context.ctrl = ctrl; | ||
666 | } | ||
667 | |||
668 | _reset_gpio(bank, bank->chip.base + offset); | 700 | _reset_gpio(bank, bank->chip.base + offset); |
669 | spin_unlock_irqrestore(&bank->lock, flags); | 701 | spin_unlock_irqrestore(&bank->lock, flags); |
670 | 702 | ||
@@ -672,7 +704,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
672 | * If this is the last gpio to be freed in the bank, | 704 | * If this is the last gpio to be freed in the bank, |
673 | * disable the bank module. | 705 | * disable the bank module. |
674 | */ | 706 | */ |
675 | if (!bank->mod_usage) | 707 | if (!BANK_USED(bank)) |
676 | pm_runtime_put(bank->dev); | 708 | pm_runtime_put(bank->dev); |
677 | } | 709 | } |
678 | 710 | ||
@@ -762,10 +794,20 @@ static void gpio_irq_shutdown(struct irq_data *d) | |||
762 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); | 794 | struct gpio_bank *bank = irq_data_get_irq_chip_data(d); |
763 | unsigned int gpio = irq_to_gpio(bank, d->hwirq); | 795 | unsigned int gpio = irq_to_gpio(bank, d->hwirq); |
764 | unsigned long flags; | 796 | unsigned long flags; |
797 | unsigned offset = GPIO_INDEX(bank, gpio); | ||
765 | 798 | ||
766 | spin_lock_irqsave(&bank->lock, flags); | 799 | spin_lock_irqsave(&bank->lock, flags); |
800 | bank->irq_usage &= ~(1 << offset); | ||
801 | _disable_gpio_module(bank, offset); | ||
767 | _reset_gpio(bank, gpio); | 802 | _reset_gpio(bank, gpio); |
768 | spin_unlock_irqrestore(&bank->lock, flags); | 803 | spin_unlock_irqrestore(&bank->lock, flags); |
804 | |||
805 | /* | ||
806 | * If this is the last IRQ to be freed in the bank, | ||
807 | * disable the bank module. | ||
808 | */ | ||
809 | if (!BANK_USED(bank)) | ||
810 | pm_runtime_put(bank->dev); | ||
769 | } | 811 | } |
770 | 812 | ||
771 | static void gpio_ack_irq(struct irq_data *d) | 813 | static void gpio_ack_irq(struct irq_data *d) |
@@ -897,13 +939,6 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset) | |||
897 | return 0; | 939 | return 0; |
898 | } | 940 | } |
899 | 941 | ||
900 | static int gpio_is_input(struct gpio_bank *bank, int mask) | ||
901 | { | ||
902 | void __iomem *reg = bank->base + bank->regs->direction; | ||
903 | |||
904 | return __raw_readl(reg) & mask; | ||
905 | } | ||
906 | |||
907 | static int gpio_get(struct gpio_chip *chip, unsigned offset) | 942 | static int gpio_get(struct gpio_chip *chip, unsigned offset) |
908 | { | 943 | { |
909 | struct gpio_bank *bank; | 944 | struct gpio_bank *bank; |
@@ -922,13 +957,22 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) | |||
922 | { | 957 | { |
923 | struct gpio_bank *bank; | 958 | struct gpio_bank *bank; |
924 | unsigned long flags; | 959 | unsigned long flags; |
960 | int retval = 0; | ||
925 | 961 | ||
926 | bank = container_of(chip, struct gpio_bank, chip); | 962 | bank = container_of(chip, struct gpio_bank, chip); |
927 | spin_lock_irqsave(&bank->lock, flags); | 963 | spin_lock_irqsave(&bank->lock, flags); |
964 | |||
965 | if (LINE_USED(bank->irq_usage, offset)) { | ||
966 | retval = -EINVAL; | ||
967 | goto exit; | ||
968 | } | ||
969 | |||
928 | bank->set_dataout(bank, offset, value); | 970 | bank->set_dataout(bank, offset, value); |
929 | _set_gpio_direction(bank, offset, 0); | 971 | _set_gpio_direction(bank, offset, 0); |
972 | |||
973 | exit: | ||
930 | spin_unlock_irqrestore(&bank->lock, flags); | 974 | spin_unlock_irqrestore(&bank->lock, flags); |
931 | return 0; | 975 | return retval; |
932 | } | 976 | } |
933 | 977 | ||
934 | static int gpio_debounce(struct gpio_chip *chip, unsigned offset, | 978 | static int gpio_debounce(struct gpio_chip *chip, unsigned offset, |
@@ -1400,7 +1444,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode) | |||
1400 | struct gpio_bank *bank; | 1444 | struct gpio_bank *bank; |
1401 | 1445 | ||
1402 | list_for_each_entry(bank, &omap_gpio_list, node) { | 1446 | list_for_each_entry(bank, &omap_gpio_list, node) { |
1403 | if (!bank->mod_usage || !bank->loses_context) | 1447 | if (!BANK_USED(bank) || !bank->loses_context) |
1404 | continue; | 1448 | continue; |
1405 | 1449 | ||
1406 | bank->power_mode = pwr_mode; | 1450 | bank->power_mode = pwr_mode; |
@@ -1414,7 +1458,7 @@ void omap2_gpio_resume_after_idle(void) | |||
1414 | struct gpio_bank *bank; | 1458 | struct gpio_bank *bank; |
1415 | 1459 | ||
1416 | list_for_each_entry(bank, &omap_gpio_list, node) { | 1460 | list_for_each_entry(bank, &omap_gpio_list, node) { |
1417 | if (!bank->mod_usage || !bank->loses_context) | 1461 | if (!BANK_USED(bank) || !bank->loses_context) |
1418 | continue; | 1462 | continue; |
1419 | 1463 | ||
1420 | pm_runtime_get_sync(bank->dev); | 1464 | pm_runtime_get_sync(bank->dev); |
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index e3745eb07570..6038966ab045 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c | |||
@@ -293,10 +293,9 @@ static void gpio_rcar_parse_pdata(struct gpio_rcar_priv *p) | |||
293 | if (pdata) { | 293 | if (pdata) { |
294 | p->config = *pdata; | 294 | p->config = *pdata; |
295 | } else if (IS_ENABLED(CONFIG_OF) && np) { | 295 | } else if (IS_ENABLED(CONFIG_OF) && np) { |
296 | ret = of_parse_phandle_with_args(np, "gpio-ranges", | 296 | ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, |
297 | "#gpio-range-cells", 0, &args); | 297 | &args); |
298 | p->config.number_of_pins = ret == 0 && args.args_count == 3 | 298 | p->config.number_of_pins = ret == 0 ? args.args[2] |
299 | ? args.args[2] | ||
300 | : RCAR_MAX_GPIO_PER_BANK; | 299 | : RCAR_MAX_GPIO_PER_BANK; |
301 | p->config.gpio_base = -1; | 300 | p->config.gpio_base = -1; |
302 | } | 301 | } |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 1688ff500513..830f7501cb4d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -2925,6 +2925,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb) | |||
2925 | /* Speaker Allocation Data Block */ | 2925 | /* Speaker Allocation Data Block */ |
2926 | if (dbl == 3) { | 2926 | if (dbl == 3) { |
2927 | *sadb = kmalloc(dbl, GFP_KERNEL); | 2927 | *sadb = kmalloc(dbl, GFP_KERNEL); |
2928 | if (!*sadb) | ||
2929 | return -ENOMEM; | ||
2928 | memcpy(*sadb, &db[1], dbl); | 2930 | memcpy(*sadb, &db[1], dbl); |
2929 | count = dbl; | 2931 | count = dbl; |
2930 | break; | 2932 | break; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f6f6cc7fc133..3d13ca6e257f 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -416,14 +416,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) | |||
416 | return; | 416 | return; |
417 | 417 | ||
418 | /* | 418 | /* |
419 | * fbdev->blank can be called from irq context in case of a panic. | ||
420 | * Since we already have our own special panic handler which will | ||
421 | * restore the fbdev console mode completely, just bail out early. | ||
422 | */ | ||
423 | if (oops_in_progress) | ||
424 | return; | ||
425 | |||
426 | /* | ||
427 | * For each CRTC in this fb, turn the connectors on/off. | 419 | * For each CRTC in this fb, turn the connectors on/off. |
428 | */ | 420 | */ |
429 | drm_modeset_lock_all(dev); | 421 | drm_modeset_lock_all(dev); |
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 92babac362ec..2db731f00930 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c | |||
@@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt) | |||
204 | if (IS_ERR(pages)) | 204 | if (IS_ERR(pages)) |
205 | return PTR_ERR(pages); | 205 | return PTR_ERR(pages); |
206 | 206 | ||
207 | gt->npage = gt->gem.size / PAGE_SIZE; | ||
207 | gt->pages = pages; | 208 | gt->pages = pages; |
208 | 209 | ||
209 | return 0; | 210 | return 0; |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index b1f8fc69023f..60e84043aa34 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
707 | reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); | 707 | reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2); |
708 | break; | 708 | break; |
709 | case DRM_MODE_DPMS_OFF: | 709 | case DRM_MODE_DPMS_OFF: |
710 | /* disable audio and video ports */ | 710 | /* disable video ports */ |
711 | reg_write(encoder, REG_ENA_AP, 0x00); | ||
712 | reg_write(encoder, REG_ENA_VP_0, 0x00); | 711 | reg_write(encoder, REG_ENA_VP_0, 0x00); |
713 | reg_write(encoder, REG_ENA_VP_1, 0x00); | 712 | reg_write(encoder, REG_ENA_VP_1, 0x00); |
714 | reg_write(encoder, REG_ENA_VP_2, 0x00); | 713 | reg_write(encoder, REG_ENA_VP_2, 0x00); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c27a21034a5e..d5c784d48671 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1290 | * then we do not take part in VGA arbitration and the | 1290 | * then we do not take part in VGA arbitration and the |
1291 | * vga_client_register() fails with -ENODEV. | 1291 | * vga_client_register() fails with -ENODEV. |
1292 | */ | 1292 | */ |
1293 | if (!HAS_PCH_SPLIT(dev)) { | 1293 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); |
1294 | ret = vga_client_register(dev->pdev, dev, NULL, | 1294 | if (ret && ret != -ENODEV) |
1295 | i915_vga_set_decode); | 1295 | goto out; |
1296 | if (ret && ret != -ENODEV) | ||
1297 | goto out; | ||
1298 | } | ||
1299 | 1296 | ||
1300 | intel_register_dsm_handler(); | 1297 | intel_register_dsm_handler(); |
1301 | 1298 | ||
@@ -1351,12 +1348,6 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1351 | */ | 1348 | */ |
1352 | intel_fbdev_initial_config(dev); | 1349 | intel_fbdev_initial_config(dev); |
1353 | 1350 | ||
1354 | /* | ||
1355 | * Must do this after fbcon init so that | ||
1356 | * vgacon_save_screen() works during the handover. | ||
1357 | */ | ||
1358 | i915_disable_vga_mem(dev); | ||
1359 | |||
1360 | /* Only enable hotplug handling once the fbdev is fully set up. */ | 1351 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
1361 | dev_priv->enable_hotplug_processing = true; | 1352 | dev_priv->enable_hotplug_processing = true; |
1362 | 1353 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index df9253d890ee..cdfb9da0e4ce 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -4800,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) | |||
4800 | 4800 | ||
4801 | if (!mutex_trylock(&dev->struct_mutex)) { | 4801 | if (!mutex_trylock(&dev->struct_mutex)) { |
4802 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) | 4802 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) |
4803 | return SHRINK_STOP; | 4803 | return 0; |
4804 | 4804 | ||
4805 | if (dev_priv->mm.shrinker_no_lock_stealing) | 4805 | if (dev_priv->mm.shrinker_no_lock_stealing) |
4806 | return SHRINK_STOP; | 4806 | return 0; |
4807 | 4807 | ||
4808 | unlock = false; | 4808 | unlock = false; |
4809 | } | 4809 | } |
@@ -4901,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) | |||
4901 | 4901 | ||
4902 | if (!mutex_trylock(&dev->struct_mutex)) { | 4902 | if (!mutex_trylock(&dev->struct_mutex)) { |
4903 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) | 4903 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) |
4904 | return 0; | 4904 | return SHRINK_STOP; |
4905 | 4905 | ||
4906 | if (dev_priv->mm.shrinker_no_lock_stealing) | 4906 | if (dev_priv->mm.shrinker_no_lock_stealing) |
4907 | return 0; | 4907 | return SHRINK_STOP; |
4908 | 4908 | ||
4909 | unlock = false; | 4909 | unlock = false; |
4910 | } | 4910 | } |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index aba9d7498996..dae364f0028c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, | |||
143 | 143 | ||
144 | /* Seek the first printf which is hits start position */ | 144 | /* Seek the first printf which is hits start position */ |
145 | if (e->pos < e->start) { | 145 | if (e->pos < e->start) { |
146 | len = vsnprintf(NULL, 0, f, args); | 146 | va_list tmp; |
147 | if (!__i915_error_seek(e, len)) | 147 | |
148 | va_copy(tmp, args); | ||
149 | if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) | ||
148 | return; | 150 | return; |
149 | } | 151 | } |
150 | 152 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c159e1a6810f..38f96f65d87a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -3881,6 +3881,9 @@ | |||
3881 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 | 3881 | #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 |
3882 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) | 3882 | #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) |
3883 | 3883 | ||
3884 | #define HSW_SCRATCH1 0xb038 | ||
3885 | #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) | ||
3886 | |||
3884 | #define HSW_FUSE_STRAP 0x42014 | 3887 | #define HSW_FUSE_STRAP 0x42014 |
3885 | #define HSW_CDCLK_LIMIT (1 << 24) | 3888 | #define HSW_CDCLK_LIMIT (1 << 24) |
3886 | 3889 | ||
@@ -4728,6 +4731,9 @@ | |||
4728 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 | 4731 | #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 |
4729 | #define DOP_CLOCK_GATING_DISABLE (1<<0) | 4732 | #define DOP_CLOCK_GATING_DISABLE (1<<0) |
4730 | 4733 | ||
4734 | #define HSW_ROW_CHICKEN3 0xe49c | ||
4735 | #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) | ||
4736 | |||
4731 | #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) | 4737 | #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) |
4732 | #define INTEL_AUDIO_DEVCL 0x808629FB | 4738 | #define INTEL_AUDIO_DEVCL 0x808629FB |
4733 | #define INTEL_AUDIO_DEVBLC 0x80862801 | 4739 | #define INTEL_AUDIO_DEVBLC 0x80862801 |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d8a1d98693e7..581fb4b2f766 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -3941,8 +3941,6 @@ static void intel_connector_check_state(struct intel_connector *connector) | |||
3941 | * consider. */ | 3941 | * consider. */ |
3942 | void intel_connector_dpms(struct drm_connector *connector, int mode) | 3942 | void intel_connector_dpms(struct drm_connector *connector, int mode) |
3943 | { | 3943 | { |
3944 | struct intel_encoder *encoder = intel_attached_encoder(connector); | ||
3945 | |||
3946 | /* All the simple cases only support two dpms states. */ | 3944 | /* All the simple cases only support two dpms states. */ |
3947 | if (mode != DRM_MODE_DPMS_ON) | 3945 | if (mode != DRM_MODE_DPMS_ON) |
3948 | mode = DRM_MODE_DPMS_OFF; | 3946 | mode = DRM_MODE_DPMS_OFF; |
@@ -3953,10 +3951,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode) | |||
3953 | connector->dpms = mode; | 3951 | connector->dpms = mode; |
3954 | 3952 | ||
3955 | /* Only need to change hw state when actually enabled */ | 3953 | /* Only need to change hw state when actually enabled */ |
3956 | if (encoder->base.crtc) | 3954 | if (connector->encoder) |
3957 | intel_encoder_dpms(encoder, mode); | 3955 | intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); |
3958 | else | ||
3959 | WARN_ON(encoder->connectors_active != false); | ||
3960 | 3956 | ||
3961 | intel_modeset_check_state(connector->dev); | 3957 | intel_modeset_check_state(connector->dev); |
3962 | } | 3958 | } |
@@ -4775,6 +4771,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | |||
4775 | 4771 | ||
4776 | pipeconf = 0; | 4772 | pipeconf = 0; |
4777 | 4773 | ||
4774 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && | ||
4775 | I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) | ||
4776 | pipeconf |= PIPECONF_ENABLE; | ||
4777 | |||
4778 | if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { | 4778 | if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
4779 | /* Enable pixel doubling when the dot clock is > 90% of the (display) | 4779 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
4780 | * core speed. | 4780 | * core speed. |
@@ -10045,33 +10045,6 @@ static void i915_disable_vga(struct drm_device *dev) | |||
10045 | POSTING_READ(vga_reg); | 10045 | POSTING_READ(vga_reg); |
10046 | } | 10046 | } |
10047 | 10047 | ||
10048 | static void i915_enable_vga_mem(struct drm_device *dev) | ||
10049 | { | ||
10050 | /* Enable VGA memory on Intel HD */ | ||
10051 | if (HAS_PCH_SPLIT(dev)) { | ||
10052 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10053 | outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE); | ||
10054 | vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | | ||
10055 | VGA_RSRC_LEGACY_MEM | | ||
10056 | VGA_RSRC_NORMAL_IO | | ||
10057 | VGA_RSRC_NORMAL_MEM); | ||
10058 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10059 | } | ||
10060 | } | ||
10061 | |||
10062 | void i915_disable_vga_mem(struct drm_device *dev) | ||
10063 | { | ||
10064 | /* Disable VGA memory on Intel HD */ | ||
10065 | if (HAS_PCH_SPLIT(dev)) { | ||
10066 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10067 | outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE); | ||
10068 | vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | | ||
10069 | VGA_RSRC_NORMAL_IO | | ||
10070 | VGA_RSRC_NORMAL_MEM); | ||
10071 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
10072 | } | ||
10073 | } | ||
10074 | |||
10075 | void intel_modeset_init_hw(struct drm_device *dev) | 10048 | void intel_modeset_init_hw(struct drm_device *dev) |
10076 | { | 10049 | { |
10077 | intel_init_power_well(dev); | 10050 | intel_init_power_well(dev); |
@@ -10350,7 +10323,6 @@ void i915_redisable_vga(struct drm_device *dev) | |||
10350 | if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { | 10323 | if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { |
10351 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); | 10324 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); |
10352 | i915_disable_vga(dev); | 10325 | i915_disable_vga(dev); |
10353 | i915_disable_vga_mem(dev); | ||
10354 | } | 10326 | } |
10355 | } | 10327 | } |
10356 | 10328 | ||
@@ -10564,8 +10536,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
10564 | 10536 | ||
10565 | intel_disable_fbc(dev); | 10537 | intel_disable_fbc(dev); |
10566 | 10538 | ||
10567 | i915_enable_vga_mem(dev); | ||
10568 | |||
10569 | intel_disable_gt_powersave(dev); | 10539 | intel_disable_gt_powersave(dev); |
10570 | 10540 | ||
10571 | ironlake_teardown_rc6(dev); | 10541 | ironlake_teardown_rc6(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2151d13772b8..2c555f91bfae 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
588 | DRM_DEBUG_KMS("aux_ch native nack\n"); | 588 | DRM_DEBUG_KMS("aux_ch native nack\n"); |
589 | return -EREMOTEIO; | 589 | return -EREMOTEIO; |
590 | case AUX_NATIVE_REPLY_DEFER: | 590 | case AUX_NATIVE_REPLY_DEFER: |
591 | udelay(100); | 591 | /* |
592 | * For now, just give more slack to branch devices. We | ||
593 | * could check the DPCD for I2C bit rate capabilities, | ||
594 | * and if available, adjust the interval. We could also | ||
595 | * be more careful with DP-to-Legacy adapters where a | ||
596 | * long legacy cable may force very low I2C bit rates. | ||
597 | */ | ||
598 | if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | ||
599 | DP_DWN_STRM_PORT_PRESENT) | ||
600 | usleep_range(500, 600); | ||
601 | else | ||
602 | usleep_range(300, 400); | ||
592 | continue; | 603 | continue; |
593 | default: | 604 | default: |
594 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", | 605 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", |
@@ -1456,7 +1467,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) | |||
1456 | 1467 | ||
1457 | /* Avoid continuous PSR exit by masking memup and hpd */ | 1468 | /* Avoid continuous PSR exit by masking memup and hpd */ |
1458 | I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | | 1469 | I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | |
1459 | EDP_PSR_DEBUG_MASK_HPD); | 1470 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); |
1460 | 1471 | ||
1461 | intel_dp->psr_setup_done = true; | 1472 | intel_dp->psr_setup_done = true; |
1462 | } | 1473 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 28cae80495e2..9b7b68fd5d47 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -793,6 +793,5 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev); | |||
793 | extern void hsw_pc8_restore_interrupts(struct drm_device *dev); | 793 | extern void hsw_pc8_restore_interrupts(struct drm_device *dev); |
794 | extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); | 794 | extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); |
795 | extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); | 795 | extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); |
796 | extern void i915_disable_vga_mem(struct drm_device *dev); | ||
797 | 796 | ||
798 | #endif /* __INTEL_DRV_H__ */ | 797 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index dd176b7296c1..f4c5e95b2d6f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3864,8 +3864,6 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3864 | dev_priv->rps.rpe_delay), | 3864 | dev_priv->rps.rpe_delay), |
3865 | dev_priv->rps.rpe_delay); | 3865 | dev_priv->rps.rpe_delay); |
3866 | 3866 | ||
3867 | INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work); | ||
3868 | |||
3869 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3867 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); |
3870 | 3868 | ||
3871 | gen6_enable_rps_interrupts(dev); | 3869 | gen6_enable_rps_interrupts(dev); |
@@ -4955,6 +4953,11 @@ static void haswell_init_clock_gating(struct drm_device *dev) | |||
4955 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, | 4953 | I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, |
4956 | GEN7_WA_L3_CHICKEN_MODE); | 4954 | GEN7_WA_L3_CHICKEN_MODE); |
4957 | 4955 | ||
4956 | /* L3 caching of data atomics doesn't work -- disable it. */ | ||
4957 | I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); | ||
4958 | I915_WRITE(HSW_ROW_CHICKEN3, | ||
4959 | _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); | ||
4960 | |||
4958 | /* This is required by WaCatErrorRejectionIssue:hsw */ | 4961 | /* This is required by WaCatErrorRejectionIssue:hsw */ |
4959 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, | 4962 | I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, |
4960 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | | 4963 | I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | |
@@ -5681,5 +5684,7 @@ void intel_pm_init(struct drm_device *dev) | |||
5681 | 5684 | ||
5682 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | 5685 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, |
5683 | intel_gen6_powersave_work); | 5686 | intel_gen6_powersave_work); |
5687 | |||
5688 | INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work); | ||
5684 | } | 5689 | } |
5685 | 5690 | ||
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index f2c6d7909ae2..dd6f84bf6c22 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder, | |||
916 | DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); | 916 | DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); |
917 | pipe_config->pipe_bpp = 8*3; | 917 | pipe_config->pipe_bpp = 8*3; |
918 | 918 | ||
919 | /* TV has it's own notion of sync and other mode flags, so clear them. */ | ||
920 | pipe_config->adjusted_mode.flags = 0; | ||
921 | |||
922 | /* | ||
923 | * FIXME: We don't check whether the input mode is actually what we want | ||
924 | * or whether userspace is doing something stupid. | ||
925 | */ | ||
926 | |||
919 | return true; | 927 | return true; |
920 | } | 928 | } |
921 | 929 | ||
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c index 5db5bbaedae2..bc7fd11ad8be 100644 --- a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c | |||
@@ -19,8 +19,6 @@ | |||
19 | #include "msm_drv.h" | 19 | #include "msm_drv.h" |
20 | #include "mdp4_kms.h" | 20 | #include "mdp4_kms.h" |
21 | 21 | ||
22 | #include <mach/iommu.h> | ||
23 | |||
24 | static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); | 22 | static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev); |
25 | 23 | ||
26 | static int mdp4_hw_init(struct msm_kms *kms) | 24 | static int mdp4_hw_init(struct msm_kms *kms) |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 008d772384c7..b3a2f1629041 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -18,8 +18,6 @@ | |||
18 | #include "msm_drv.h" | 18 | #include "msm_drv.h" |
19 | #include "msm_gpu.h" | 19 | #include "msm_gpu.h" |
20 | 20 | ||
21 | #include <mach/iommu.h> | ||
22 | |||
23 | static void msm_fb_output_poll_changed(struct drm_device *dev) | 21 | static void msm_fb_output_poll_changed(struct drm_device *dev) |
24 | { | 22 | { |
25 | struct msm_drm_private *priv = dev->dev_private; | 23 | struct msm_drm_private *priv = dev->dev_private; |
@@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu, | |||
62 | int i, ret; | 60 | int i, ret; |
63 | 61 | ||
64 | for (i = 0; i < cnt; i++) { | 62 | for (i = 0; i < cnt; i++) { |
63 | /* TODO maybe some day msm iommu won't require this hack: */ | ||
64 | struct device *msm_iommu_get_ctx(const char *ctx_name); | ||
65 | struct device *ctx = msm_iommu_get_ctx(names[i]); | 65 | struct device *ctx = msm_iommu_get_ctx(names[i]); |
66 | if (!ctx) | 66 | if (!ctx) |
67 | continue; | 67 | continue; |
@@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) | |||
199 | * imx drm driver on iMX5 | 199 | * imx drm driver on iMX5 |
200 | */ | 200 | */ |
201 | dev_err(dev->dev, "failed to load kms\n"); | 201 | dev_err(dev->dev, "failed to load kms\n"); |
202 | ret = PTR_ERR(priv->kms); | 202 | ret = PTR_ERR(kms); |
203 | goto fail; | 203 | goto fail; |
204 | } | 204 | } |
205 | 205 | ||
@@ -697,7 +697,7 @@ static struct drm_driver msm_driver = { | |||
697 | .gem_vm_ops = &vm_ops, | 697 | .gem_vm_ops = &vm_ops, |
698 | .dumb_create = msm_gem_dumb_create, | 698 | .dumb_create = msm_gem_dumb_create, |
699 | .dumb_map_offset = msm_gem_dumb_map_offset, | 699 | .dumb_map_offset = msm_gem_dumb_map_offset, |
700 | .dumb_destroy = msm_gem_dumb_destroy, | 700 | .dumb_destroy = drm_gem_dumb_destroy, |
701 | #ifdef CONFIG_DEBUG_FS | 701 | #ifdef CONFIG_DEBUG_FS |
702 | .debugfs_init = msm_debugfs_init, | 702 | .debugfs_init = msm_debugfs_init, |
703 | .debugfs_cleanup = msm_debugfs_cleanup, | 703 | .debugfs_cleanup = msm_debugfs_cleanup, |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 29eacfa29cfb..2bae46c66a30 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -319,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, | |||
319 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); | 319 | MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); |
320 | } | 320 | } |
321 | 321 | ||
322 | int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, | ||
323 | uint32_t handle) | ||
324 | { | ||
325 | /* No special work needed, drop the reference and see what falls out */ | ||
326 | return drm_gem_handle_delete(file, handle); | ||
327 | } | ||
328 | |||
329 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, | 322 | int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
330 | uint32_t handle, uint64_t *offset) | 323 | uint32_t handle, uint64_t *offset) |
331 | { | 324 | { |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 37712a6df923..e290cfa4acee 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c | |||
@@ -113,7 +113,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, | |||
113 | pmc->use_msi = false; | 113 | pmc->use_msi = false; |
114 | break; | 114 | break; |
115 | default: | 115 | default: |
116 | pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true); | 116 | pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false); |
117 | if (pmc->use_msi) { | 117 | if (pmc->use_msi) { |
118 | pmc->use_msi = pci_enable_msi(device->pdev) == 0; | 118 | pmc->use_msi = pci_enable_msi(device->pdev) == 0; |
119 | if (pmc->use_msi) { | 119 | if (pmc->use_msi) { |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 05ff315e8e9e..9b6950d9b3c0 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
@@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = | |||
1168 | { 25000, 30000, RADEON_SCLK_UP } | 1168 | { 25000, 30000, RADEON_SCLK_UP } |
1169 | }; | 1169 | }; |
1170 | 1170 | ||
1171 | void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, | ||
1172 | u32 *max_clock) | ||
1173 | { | ||
1174 | u32 i, clock = 0; | ||
1175 | |||
1176 | if ((table == NULL) || (table->count == 0)) { | ||
1177 | *max_clock = clock; | ||
1178 | return; | ||
1179 | } | ||
1180 | |||
1181 | for (i = 0; i < table->count; i++) { | ||
1182 | if (clock < table->entries[i].clk) | ||
1183 | clock = table->entries[i].clk; | ||
1184 | } | ||
1185 | *max_clock = clock; | ||
1186 | } | ||
1187 | |||
1171 | void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, | 1188 | void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, |
1172 | u32 clock, u16 max_voltage, u16 *voltage) | 1189 | u32 clock, u16 max_voltage, u16 *voltage) |
1173 | { | 1190 | { |
@@ -1913,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev, | |||
1913 | } | 1930 | } |
1914 | j++; | 1931 | j++; |
1915 | 1932 | ||
1916 | if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) | 1933 | if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) |
1917 | return -EINVAL; | 1934 | return -EINVAL; |
1918 | 1935 | ||
1919 | tmp = RREG32(MC_PMG_CMD_MRS); | 1936 | tmp = RREG32(MC_PMG_CMD_MRS); |
@@ -1928,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev, | |||
1928 | } | 1945 | } |
1929 | j++; | 1946 | j++; |
1930 | 1947 | ||
1931 | if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) | 1948 | if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) |
1932 | return -EINVAL; | 1949 | return -EINVAL; |
1933 | break; | 1950 | break; |
1934 | case MC_SEQ_RESERVE_M >> 2: | 1951 | case MC_SEQ_RESERVE_M >> 2: |
@@ -1942,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev, | |||
1942 | } | 1959 | } |
1943 | j++; | 1960 | j++; |
1944 | 1961 | ||
1945 | if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) | 1962 | if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE) |
1946 | return -EINVAL; | 1963 | return -EINVAL; |
1947 | break; | 1964 | break; |
1948 | default: | 1965 | default: |
@@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2080 | bool disable_mclk_switching; | 2097 | bool disable_mclk_switching; |
2081 | u32 mclk, sclk; | 2098 | u32 mclk, sclk; |
2082 | u16 vddc, vddci; | 2099 | u16 vddc, vddci; |
2100 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; | ||
2083 | 2101 | ||
2084 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 2102 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
2085 | btc_dpm_vblank_too_short(rdev)) | 2103 | btc_dpm_vblank_too_short(rdev)) |
@@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2121 | ps->low.vddci = max_limits->vddci; | 2139 | ps->low.vddci = max_limits->vddci; |
2122 | } | 2140 | } |
2123 | 2141 | ||
2142 | /* limit clocks to max supported clocks based on voltage dependency tables */ | ||
2143 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | ||
2144 | &max_sclk_vddc); | ||
2145 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | ||
2146 | &max_mclk_vddci); | ||
2147 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | ||
2148 | &max_mclk_vddc); | ||
2149 | |||
2150 | if (max_sclk_vddc) { | ||
2151 | if (ps->low.sclk > max_sclk_vddc) | ||
2152 | ps->low.sclk = max_sclk_vddc; | ||
2153 | if (ps->medium.sclk > max_sclk_vddc) | ||
2154 | ps->medium.sclk = max_sclk_vddc; | ||
2155 | if (ps->high.sclk > max_sclk_vddc) | ||
2156 | ps->high.sclk = max_sclk_vddc; | ||
2157 | } | ||
2158 | if (max_mclk_vddci) { | ||
2159 | if (ps->low.mclk > max_mclk_vddci) | ||
2160 | ps->low.mclk = max_mclk_vddci; | ||
2161 | if (ps->medium.mclk > max_mclk_vddci) | ||
2162 | ps->medium.mclk = max_mclk_vddci; | ||
2163 | if (ps->high.mclk > max_mclk_vddci) | ||
2164 | ps->high.mclk = max_mclk_vddci; | ||
2165 | } | ||
2166 | if (max_mclk_vddc) { | ||
2167 | if (ps->low.mclk > max_mclk_vddc) | ||
2168 | ps->low.mclk = max_mclk_vddc; | ||
2169 | if (ps->medium.mclk > max_mclk_vddc) | ||
2170 | ps->medium.mclk = max_mclk_vddc; | ||
2171 | if (ps->high.mclk > max_mclk_vddc) | ||
2172 | ps->high.mclk = max_mclk_vddc; | ||
2173 | } | ||
2174 | |||
2124 | /* XXX validate the min clocks required for display */ | 2175 | /* XXX validate the min clocks required for display */ |
2125 | 2176 | ||
2126 | if (disable_mclk_switching) { | 2177 | if (disable_mclk_switching) { |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h index 1a15e0e41950..3b6f12b7760b 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.h +++ b/drivers/gpu/drm/radeon/btc_dpm.h | |||
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev, | |||
46 | struct rv7xx_pl *pl); | 46 | struct rv7xx_pl *pl); |
47 | void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, | 47 | void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, |
48 | u32 clock, u16 max_voltage, u16 *voltage); | 48 | u32 clock, u16 max_voltage, u16 *voltage); |
49 | void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, | ||
50 | u32 *max_clock); | ||
49 | void btc_apply_voltage_delta_rules(struct radeon_device *rdev, | 51 | void btc_apply_voltage_delta_rules(struct radeon_device *rdev, |
50 | u16 max_vddc, u16 max_vddci, | 52 | u16 max_vddc, u16 max_vddci, |
51 | u16 *vddc, u16 *vddci); | 53 | u16 *vddc, u16 *vddci); |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 899627443030..51e947a97edf 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
@@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] = | |||
146 | }; | 146 | }; |
147 | 147 | ||
148 | extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); | 148 | extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); |
149 | extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, | ||
150 | u32 *max_clock); | ||
149 | extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, | 151 | extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, |
150 | u32 arb_freq_src, u32 arb_freq_dest); | 152 | u32 arb_freq_src, u32 arb_freq_dest); |
151 | extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); | 153 | extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); |
@@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, | |||
712 | struct radeon_clock_and_voltage_limits *max_limits; | 714 | struct radeon_clock_and_voltage_limits *max_limits; |
713 | bool disable_mclk_switching; | 715 | bool disable_mclk_switching; |
714 | u32 sclk, mclk; | 716 | u32 sclk, mclk; |
717 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; | ||
715 | int i; | 718 | int i; |
716 | 719 | ||
717 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 720 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
@@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev, | |||
739 | } | 742 | } |
740 | } | 743 | } |
741 | 744 | ||
745 | /* limit clocks to max supported clocks based on voltage dependency tables */ | ||
746 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | ||
747 | &max_sclk_vddc); | ||
748 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | ||
749 | &max_mclk_vddci); | ||
750 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | ||
751 | &max_mclk_vddc); | ||
752 | |||
753 | for (i = 0; i < ps->performance_level_count; i++) { | ||
754 | if (max_sclk_vddc) { | ||
755 | if (ps->performance_levels[i].sclk > max_sclk_vddc) | ||
756 | ps->performance_levels[i].sclk = max_sclk_vddc; | ||
757 | } | ||
758 | if (max_mclk_vddci) { | ||
759 | if (ps->performance_levels[i].mclk > max_mclk_vddci) | ||
760 | ps->performance_levels[i].mclk = max_mclk_vddci; | ||
761 | } | ||
762 | if (max_mclk_vddc) { | ||
763 | if (ps->performance_levels[i].mclk > max_mclk_vddc) | ||
764 | ps->performance_levels[i].mclk = max_mclk_vddc; | ||
765 | } | ||
766 | } | ||
767 | |||
742 | /* XXX validate the min clocks required for display */ | 768 | /* XXX validate the min clocks required for display */ |
743 | 769 | ||
744 | if (disable_mclk_switching) { | 770 | if (disable_mclk_switching) { |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index adbdb6503b05..b874ccdf52f7 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev); | |||
77 | static void cik_program_aspm(struct radeon_device *rdev); | 77 | static void cik_program_aspm(struct radeon_device *rdev); |
78 | static void cik_init_pg(struct radeon_device *rdev); | 78 | static void cik_init_pg(struct radeon_device *rdev); |
79 | static void cik_init_cg(struct radeon_device *rdev); | 79 | static void cik_init_cg(struct radeon_device *rdev); |
80 | static void cik_fini_pg(struct radeon_device *rdev); | ||
81 | static void cik_fini_cg(struct radeon_device *rdev); | ||
80 | static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, | 82 | static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, |
81 | bool enable); | 83 | bool enable); |
82 | 84 | ||
@@ -2845,10 +2847,8 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
2845 | rdev->config.cik.tile_config |= (3 << 0); | 2847 | rdev->config.cik.tile_config |= (3 << 0); |
2846 | break; | 2848 | break; |
2847 | } | 2849 | } |
2848 | if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) | 2850 | rdev->config.cik.tile_config |= |
2849 | rdev->config.cik.tile_config |= 1 << 4; | 2851 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; |
2850 | else | ||
2851 | rdev->config.cik.tile_config |= 0 << 4; | ||
2852 | rdev->config.cik.tile_config |= | 2852 | rdev->config.cik.tile_config |= |
2853 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; | 2853 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; |
2854 | rdev->config.cik.tile_config |= | 2854 | rdev->config.cik.tile_config |= |
@@ -4187,6 +4187,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
4187 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 4187 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
4188 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | 4188 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); |
4189 | 4189 | ||
4190 | /* disable CG/PG */ | ||
4191 | cik_fini_pg(rdev); | ||
4192 | cik_fini_cg(rdev); | ||
4193 | |||
4190 | /* stop the rlc */ | 4194 | /* stop the rlc */ |
4191 | cik_rlc_stop(rdev); | 4195 | cik_rlc_stop(rdev); |
4192 | 4196 | ||
@@ -4456,8 +4460,8 @@ static int cik_mc_init(struct radeon_device *rdev) | |||
4456 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 4460 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
4457 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 4461 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
4458 | /* size in MB on si */ | 4462 | /* size in MB on si */ |
4459 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 4463 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
4460 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 4464 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
4461 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 4465 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
4462 | si_vram_gtt_location(rdev, &rdev->mc); | 4466 | si_vram_gtt_location(rdev, &rdev->mc); |
4463 | radeon_update_bandwidth_info(rdev); | 4467 | radeon_update_bandwidth_info(rdev); |
@@ -4735,12 +4739,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev, | |||
4735 | u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; | 4739 | u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT; |
4736 | u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; | 4740 | u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT; |
4737 | u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; | 4741 | u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT; |
4738 | char *block = (char *)&mc_client; | 4742 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, |
4743 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; | ||
4739 | 4744 | ||
4740 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n", | 4745 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", |
4741 | protections, vmid, addr, | 4746 | protections, vmid, addr, |
4742 | (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", | 4747 | (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read", |
4743 | block, mc_id); | 4748 | block, mc_client, mc_id); |
4744 | } | 4749 | } |
4745 | 4750 | ||
4746 | /** | 4751 | /** |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 555164e270a7..b5c67a99dda9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -3131,7 +3131,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
3131 | rdev->config.evergreen.sx_max_export_size = 256; | 3131 | rdev->config.evergreen.sx_max_export_size = 256; |
3132 | rdev->config.evergreen.sx_max_export_pos_size = 64; | 3132 | rdev->config.evergreen.sx_max_export_pos_size = 64; |
3133 | rdev->config.evergreen.sx_max_export_smx_size = 192; | 3133 | rdev->config.evergreen.sx_max_export_smx_size = 192; |
3134 | rdev->config.evergreen.max_hw_contexts = 8; | 3134 | rdev->config.evergreen.max_hw_contexts = 4; |
3135 | rdev->config.evergreen.sq_num_cf_insts = 2; | 3135 | rdev->config.evergreen.sq_num_cf_insts = 2; |
3136 | 3136 | ||
3137 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 3137 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index f71ce390aebe..f815c20640bd 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -288,8 +288,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode | |||
288 | /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ | 288 | /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ |
289 | 289 | ||
290 | WREG32(HDMI_ACR_PACKET_CONTROL + offset, | 290 | WREG32(HDMI_ACR_PACKET_CONTROL + offset, |
291 | HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ | 291 | HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
292 | HDMI_ACR_SOURCE); /* select SW CTS value */ | ||
293 | 292 | ||
294 | evergreen_hdmi_update_ACR(encoder, mode->clock); | 293 | evergreen_hdmi_update_ACR(encoder, mode->clock); |
295 | 294 | ||
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 8768fd6a1e27..4f6d2962767d 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -1501,7 +1501,7 @@ | |||
1501 | * 6. COMMAND [29:22] | BYTE_COUNT [20:0] | 1501 | * 6. COMMAND [29:22] | BYTE_COUNT [20:0] |
1502 | */ | 1502 | */ |
1503 | # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) | 1503 | # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) |
1504 | /* 0 - SRC_ADDR | 1504 | /* 0 - DST_ADDR |
1505 | * 1 - GDS | 1505 | * 1 - GDS |
1506 | */ | 1506 | */ |
1507 | # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) | 1507 | # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) |
@@ -1516,7 +1516,7 @@ | |||
1516 | # define PACKET3_CP_DMA_CP_SYNC (1 << 31) | 1516 | # define PACKET3_CP_DMA_CP_SYNC (1 << 31) |
1517 | /* COMMAND */ | 1517 | /* COMMAND */ |
1518 | # define PACKET3_CP_DMA_DIS_WC (1 << 21) | 1518 | # define PACKET3_CP_DMA_DIS_WC (1 << 21) |
1519 | # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) | 1519 | # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22) |
1520 | /* 0 - none | 1520 | /* 0 - none |
1521 | * 1 - 8 in 16 | 1521 | * 1 - 8 in 16 |
1522 | * 2 - 8 in 32 | 1522 | * 2 - 8 in 32 |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 6c398a456d78..f26339028154 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, | |||
787 | bool disable_mclk_switching; | 787 | bool disable_mclk_switching; |
788 | u32 mclk, sclk; | 788 | u32 mclk, sclk; |
789 | u16 vddc, vddci; | 789 | u16 vddc, vddci; |
790 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; | ||
790 | int i; | 791 | int i; |
791 | 792 | ||
792 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 793 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
@@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev, | |||
813 | } | 814 | } |
814 | } | 815 | } |
815 | 816 | ||
817 | /* limit clocks to max supported clocks based on voltage dependency tables */ | ||
818 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | ||
819 | &max_sclk_vddc); | ||
820 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | ||
821 | &max_mclk_vddci); | ||
822 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | ||
823 | &max_mclk_vddc); | ||
824 | |||
825 | for (i = 0; i < ps->performance_level_count; i++) { | ||
826 | if (max_sclk_vddc) { | ||
827 | if (ps->performance_levels[i].sclk > max_sclk_vddc) | ||
828 | ps->performance_levels[i].sclk = max_sclk_vddc; | ||
829 | } | ||
830 | if (max_mclk_vddci) { | ||
831 | if (ps->performance_levels[i].mclk > max_mclk_vddci) | ||
832 | ps->performance_levels[i].mclk = max_mclk_vddci; | ||
833 | } | ||
834 | if (max_mclk_vddc) { | ||
835 | if (ps->performance_levels[i].mclk > max_mclk_vddc) | ||
836 | ps->performance_levels[i].mclk = max_mclk_vddc; | ||
837 | } | ||
838 | } | ||
839 | |||
816 | /* XXX validate the min clocks required for display */ | 840 | /* XXX validate the min clocks required for display */ |
817 | 841 | ||
818 | if (disable_mclk_switching) { | 842 | if (disable_mclk_switching) { |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 24175717307b..d71333033b2b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) | |||
2933 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); | 2933 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); |
2934 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); | 2934 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
2935 | seq_printf(m, "%u dwords in ring\n", count); | 2935 | seq_printf(m, "%u dwords in ring\n", count); |
2936 | for (j = 0; j <= count; j++) { | 2936 | if (ring->ready) { |
2937 | i = (rdp + j) & ring->ptr_mask; | 2937 | for (j = 0; j <= count; j++) { |
2938 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); | 2938 | i = (rdp + j) & ring->ptr_mask; |
2939 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); | ||
2940 | } | ||
2939 | } | 2941 | } |
2940 | return 0; | 2942 | return 0; |
2941 | } | 2943 | } |
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index e65f211a7be0..5513d8f06252 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c | |||
@@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) | |||
1084 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = | 1084 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = |
1085 | le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); | 1085 | le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); |
1086 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = | 1086 | rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = |
1087 | le16_to_cpu(limits->entries[i].usVoltage); | 1087 | le16_to_cpu(entry->usVoltage); |
1088 | entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) | 1088 | entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) |
1089 | ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); | 1089 | ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); |
1090 | } | 1090 | } |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f443010ce90b..5b729319f27b 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -57,15 +57,15 @@ enum r600_hdmi_iec_status_bits { | |||
57 | static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { | 57 | static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { |
58 | /* 32kHz 44.1kHz 48kHz */ | 58 | /* 32kHz 44.1kHz 48kHz */ |
59 | /* Clock N CTS N CTS N CTS */ | 59 | /* Clock N CTS N CTS N CTS */ |
60 | { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ | 60 | { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ |
61 | { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ | 61 | { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ |
62 | { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ | 62 | { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ |
63 | { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ | 63 | { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ |
64 | { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ | 64 | { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ |
65 | { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ | 65 | { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ |
66 | { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ | 66 | { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */ |
67 | { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ | 67 | { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ |
68 | { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ | 68 | { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */ |
69 | { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ | 69 | { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ |
70 | { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */ | 70 | { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */ |
71 | }; | 71 | }; |
@@ -75,8 +75,15 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { | |||
75 | */ | 75 | */ |
76 | static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq) | 76 | static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq) |
77 | { | 77 | { |
78 | if (*CTS == 0) | 78 | u64 n; |
79 | *CTS = clock * N / (128 * freq) * 1000; | 79 | u32 d; |
80 | |||
81 | if (*CTS == 0) { | ||
82 | n = (u64)clock * (u64)N * 1000ULL; | ||
83 | d = 128 * freq; | ||
84 | do_div(n, d); | ||
85 | *CTS = n; | ||
86 | } | ||
80 | DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", | 87 | DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", |
81 | N, *CTS, freq); | 88 | N, *CTS, freq); |
82 | } | 89 | } |
@@ -257,10 +264,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
257 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 264 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
258 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 265 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
259 | */ | 266 | */ |
260 | if (ASIC_IS_DCE3(rdev)) { | 267 | if (ASIC_IS_DCE32(rdev)) { |
261 | /* according to the reg specs, this should DCE3.2 only, but in | ||
262 | * practice it seems to cover DCE3.0 as well. | ||
263 | */ | ||
264 | if (dig->dig_encoder == 0) { | 268 | if (dig->dig_encoder == 0) { |
265 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | 269 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
266 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | 270 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
@@ -276,8 +280,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
276 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); | 280 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); |
277 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | 281 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ |
278 | } | 282 | } |
283 | } else if (ASIC_IS_DCE3(rdev)) { | ||
284 | /* according to the reg specs, this should DCE3.2 only, but in | ||
285 | * practice it seems to cover DCE3.0/3.1 as well. | ||
286 | */ | ||
287 | if (dig->dig_encoder == 0) { | ||
288 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | ||
289 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | ||
290 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | ||
291 | } else { | ||
292 | WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100); | ||
293 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); | ||
294 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | ||
295 | } | ||
279 | } else { | 296 | } else { |
280 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ | 297 | /* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */ |
281 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | | 298 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | |
282 | AUDIO_DTO_MODULE(clock / 10)); | 299 | AUDIO_DTO_MODULE(clock / 10)); |
283 | } | 300 | } |
@@ -434,8 +451,8 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod | |||
434 | } | 451 | } |
435 | 452 | ||
436 | WREG32(HDMI0_ACR_PACKET_CONTROL + offset, | 453 | WREG32(HDMI0_ACR_PACKET_CONTROL + offset, |
437 | HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */ | 454 | HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ |
438 | HDMI0_ACR_SOURCE); /* select SW CTS value */ | 455 | HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
439 | 456 | ||
440 | WREG32(HDMI0_VBI_PACKET_CONTROL + offset, | 457 | WREG32(HDMI0_VBI_PACKET_CONTROL + offset, |
441 | HDMI0_NULL_SEND | /* send null packets when required */ | 458 | HDMI0_NULL_SEND | /* send null packets when required */ |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index e673fe26ea84..7b3c7b5932c5 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -1523,7 +1523,7 @@ | |||
1523 | */ | 1523 | */ |
1524 | # define PACKET3_CP_DMA_CP_SYNC (1 << 31) | 1524 | # define PACKET3_CP_DMA_CP_SYNC (1 << 31) |
1525 | /* COMMAND */ | 1525 | /* COMMAND */ |
1526 | # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) | 1526 | # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22) |
1527 | /* 0 - none | 1527 | /* 0 - none |
1528 | * 1 - 8 in 16 | 1528 | * 1 - 8 in 16 |
1529 | * 2 - 8 in 32 | 1529 | * 2 - 8 in 32 |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 5003385a7512..8f7e04538fd6 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = { | |||
1004 | .wait_for_vblank = &avivo_wait_for_vblank, | 1004 | .wait_for_vblank = &avivo_wait_for_vblank, |
1005 | .set_backlight_level = &atombios_set_backlight_level, | 1005 | .set_backlight_level = &atombios_set_backlight_level, |
1006 | .get_backlight_level = &atombios_get_backlight_level, | 1006 | .get_backlight_level = &atombios_get_backlight_level, |
1007 | .hdmi_enable = &r600_hdmi_enable, | ||
1008 | .hdmi_setmode = &r600_hdmi_setmode, | ||
1007 | }, | 1009 | }, |
1008 | .copy = { | 1010 | .copy = { |
1009 | .blit = &r600_copy_cpdma, | 1011 | .blit = &r600_copy_cpdma, |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 404e25d285ba..f79ee184ffd5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, | |||
1367 | int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); | 1367 | int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); |
1368 | uint16_t data_offset, size; | 1368 | uint16_t data_offset, size; |
1369 | struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; | 1369 | struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; |
1370 | struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign; | ||
1370 | uint8_t frev, crev; | 1371 | uint8_t frev, crev; |
1371 | int i, num_indices; | 1372 | int i, num_indices; |
1372 | 1373 | ||
@@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, | |||
1378 | 1379 | ||
1379 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | 1380 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
1380 | sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); | 1381 | sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); |
1381 | 1382 | ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) | |
1383 | ((u8 *)&ss_info->asSS_Info[0]); | ||
1382 | for (i = 0; i < num_indices; i++) { | 1384 | for (i = 0; i < num_indices; i++) { |
1383 | if (ss_info->asSS_Info[i].ucSS_Id == id) { | 1385 | if (ss_assign->ucSS_Id == id) { |
1384 | ss->percentage = | 1386 | ss->percentage = |
1385 | le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); | 1387 | le16_to_cpu(ss_assign->usSpreadSpectrumPercentage); |
1386 | ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; | 1388 | ss->type = ss_assign->ucSpreadSpectrumType; |
1387 | ss->step = ss_info->asSS_Info[i].ucSS_Step; | 1389 | ss->step = ss_assign->ucSS_Step; |
1388 | ss->delay = ss_info->asSS_Info[i].ucSS_Delay; | 1390 | ss->delay = ss_assign->ucSS_Delay; |
1389 | ss->range = ss_info->asSS_Info[i].ucSS_Range; | 1391 | ss->range = ss_assign->ucSS_Range; |
1390 | ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; | 1392 | ss->refdiv = ss_assign->ucRecommendedRef_Div; |
1391 | return true; | 1393 | return true; |
1392 | } | 1394 | } |
1395 | ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*) | ||
1396 | ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT)); | ||
1393 | } | 1397 | } |
1394 | } | 1398 | } |
1395 | return false; | 1399 | return false; |
@@ -1477,6 +1481,12 @@ union asic_ss_info { | |||
1477 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; | 1481 | struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; |
1478 | }; | 1482 | }; |
1479 | 1483 | ||
1484 | union asic_ss_assignment { | ||
1485 | struct _ATOM_ASIC_SS_ASSIGNMENT v1; | ||
1486 | struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2; | ||
1487 | struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3; | ||
1488 | }; | ||
1489 | |||
1480 | bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | 1490 | bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, |
1481 | struct radeon_atom_ss *ss, | 1491 | struct radeon_atom_ss *ss, |
1482 | int id, u32 clock) | 1492 | int id, u32 clock) |
@@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1485 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | 1495 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); |
1486 | uint16_t data_offset, size; | 1496 | uint16_t data_offset, size; |
1487 | union asic_ss_info *ss_info; | 1497 | union asic_ss_info *ss_info; |
1498 | union asic_ss_assignment *ss_assign; | ||
1488 | uint8_t frev, crev; | 1499 | uint8_t frev, crev; |
1489 | int i, num_indices; | 1500 | int i, num_indices; |
1490 | 1501 | ||
@@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1509 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | 1520 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
1510 | sizeof(ATOM_ASIC_SS_ASSIGNMENT); | 1521 | sizeof(ATOM_ASIC_SS_ASSIGNMENT); |
1511 | 1522 | ||
1523 | ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]); | ||
1512 | for (i = 0; i < num_indices; i++) { | 1524 | for (i = 0; i < num_indices; i++) { |
1513 | if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && | 1525 | if ((ss_assign->v1.ucClockIndication == id) && |
1514 | (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { | 1526 | (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) { |
1515 | ss->percentage = | 1527 | ss->percentage = |
1516 | le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1528 | le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage); |
1517 | ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1529 | ss->type = ss_assign->v1.ucSpreadSpectrumMode; |
1518 | ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); | 1530 | ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); |
1519 | return true; | 1531 | return true; |
1520 | } | 1532 | } |
1533 | ss_assign = (union asic_ss_assignment *) | ||
1534 | ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT)); | ||
1521 | } | 1535 | } |
1522 | break; | 1536 | break; |
1523 | case 2: | 1537 | case 2: |
1524 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | 1538 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
1525 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); | 1539 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); |
1540 | ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]); | ||
1526 | for (i = 0; i < num_indices; i++) { | 1541 | for (i = 0; i < num_indices; i++) { |
1527 | if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && | 1542 | if ((ss_assign->v2.ucClockIndication == id) && |
1528 | (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { | 1543 | (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) { |
1529 | ss->percentage = | 1544 | ss->percentage = |
1530 | le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1545 | le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage); |
1531 | ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1546 | ss->type = ss_assign->v2.ucSpreadSpectrumMode; |
1532 | ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); | 1547 | ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); |
1533 | if ((crev == 2) && | 1548 | if ((crev == 2) && |
1534 | ((id == ASIC_INTERNAL_ENGINE_SS) || | 1549 | ((id == ASIC_INTERNAL_ENGINE_SS) || |
1535 | (id == ASIC_INTERNAL_MEMORY_SS))) | 1550 | (id == ASIC_INTERNAL_MEMORY_SS))) |
1536 | ss->rate /= 100; | 1551 | ss->rate /= 100; |
1537 | return true; | 1552 | return true; |
1538 | } | 1553 | } |
1554 | ss_assign = (union asic_ss_assignment *) | ||
1555 | ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2)); | ||
1539 | } | 1556 | } |
1540 | break; | 1557 | break; |
1541 | case 3: | 1558 | case 3: |
1542 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / | 1559 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / |
1543 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); | 1560 | sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); |
1561 | ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]); | ||
1544 | for (i = 0; i < num_indices; i++) { | 1562 | for (i = 0; i < num_indices; i++) { |
1545 | if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && | 1563 | if ((ss_assign->v3.ucClockIndication == id) && |
1546 | (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { | 1564 | (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) { |
1547 | ss->percentage = | 1565 | ss->percentage = |
1548 | le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); | 1566 | le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage); |
1549 | ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; | 1567 | ss->type = ss_assign->v3.ucSpreadSpectrumMode; |
1550 | ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); | 1568 | ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); |
1551 | if ((id == ASIC_INTERNAL_ENGINE_SS) || | 1569 | if ((id == ASIC_INTERNAL_ENGINE_SS) || |
1552 | (id == ASIC_INTERNAL_MEMORY_SS)) | 1570 | (id == ASIC_INTERNAL_MEMORY_SS)) |
1553 | ss->rate /= 100; | 1571 | ss->rate /= 100; |
@@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, | |||
1555 | radeon_atombios_get_igp_ss_overrides(rdev, ss, id); | 1573 | radeon_atombios_get_igp_ss_overrides(rdev, ss, id); |
1556 | return true; | 1574 | return true; |
1557 | } | 1575 | } |
1576 | ss_assign = (union asic_ss_assignment *) | ||
1577 | ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3)); | ||
1558 | } | 1578 | } |
1559 | break; | 1579 | break; |
1560 | default: | 1580 | default: |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ac6ece61a476..66c222836631 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -85,8 +85,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
85 | VRAM, also but everything into VRAM on AGP cards to avoid | 85 | VRAM, also but everything into VRAM on AGP cards to avoid |
86 | image corruptions */ | 86 | image corruptions */ |
87 | if (p->ring == R600_RING_TYPE_UVD_INDEX && | 87 | if (p->ring == R600_RING_TYPE_UVD_INDEX && |
88 | (i == 0 || p->rdev->flags & RADEON_IS_AGP)) { | 88 | p->rdev->family < CHIP_PALM && |
89 | /* TODO: is this still needed for NI+ ? */ | 89 | (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { |
90 | |||
90 | p->relocs[i].lobj.domain = | 91 | p->relocs[i].lobj.domain = |
91 | RADEON_GEM_DOMAIN_VRAM; | 92 | RADEON_GEM_DOMAIN_VRAM; |
92 | 93 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e29faa73b574..841d0e09be3e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1320 | return r; | 1320 | return r; |
1321 | } | 1321 | } |
1322 | if ((radeon_testing & 1)) { | 1322 | if ((radeon_testing & 1)) { |
1323 | radeon_test_moves(rdev); | 1323 | if (rdev->accel_working) |
1324 | radeon_test_moves(rdev); | ||
1325 | else | ||
1326 | DRM_INFO("radeon: acceleration disabled, skipping move tests\n"); | ||
1324 | } | 1327 | } |
1325 | if ((radeon_testing & 2)) { | 1328 | if ((radeon_testing & 2)) { |
1326 | radeon_test_syncing(rdev); | 1329 | if (rdev->accel_working) |
1330 | radeon_test_syncing(rdev); | ||
1331 | else | ||
1332 | DRM_INFO("radeon: acceleration disabled, skipping sync tests\n"); | ||
1327 | } | 1333 | } |
1328 | if (radeon_benchmarking) { | 1334 | if (radeon_benchmarking) { |
1329 | radeon_benchmark(rdev, radeon_benchmarking); | 1335 | if (rdev->accel_working) |
1336 | radeon_benchmark(rdev, radeon_benchmarking); | ||
1337 | else | ||
1338 | DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); | ||
1330 | } | 1339 | } |
1331 | return 0; | 1340 | return 0; |
1332 | } | 1341 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 87e1d69e8fdb..4f6b7fc7ad3c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -945,6 +945,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) | |||
945 | if (enable) { | 945 | if (enable) { |
946 | mutex_lock(&rdev->pm.mutex); | 946 | mutex_lock(&rdev->pm.mutex); |
947 | rdev->pm.dpm.uvd_active = true; | 947 | rdev->pm.dpm.uvd_active = true; |
948 | /* disable this for now */ | ||
949 | #if 0 | ||
948 | if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) | 950 | if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) |
949 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; | 951 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; |
950 | else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) | 952 | else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) |
@@ -954,6 +956,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) | |||
954 | else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) | 956 | else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) |
955 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; | 957 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; |
956 | else | 958 | else |
959 | #endif | ||
957 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; | 960 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; |
958 | rdev->pm.dpm.state = dpm_state; | 961 | rdev->pm.dpm.state = dpm_state; |
959 | mutex_unlock(&rdev->pm.mutex); | 962 | mutex_unlock(&rdev->pm.mutex); |
@@ -1002,7 +1005,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev) | |||
1002 | { | 1005 | { |
1003 | /* set up the default clocks if the MC ucode is loaded */ | 1006 | /* set up the default clocks if the MC ucode is loaded */ |
1004 | if ((rdev->family >= CHIP_BARTS) && | 1007 | if ((rdev->family >= CHIP_BARTS) && |
1005 | (rdev->family <= CHIP_HAINAN) && | 1008 | (rdev->family <= CHIP_CAYMAN) && |
1006 | rdev->mc_fw) { | 1009 | rdev->mc_fw) { |
1007 | if (rdev->pm.default_vddc) | 1010 | if (rdev->pm.default_vddc) |
1008 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | 1011 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
@@ -1046,7 +1049,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) | |||
1046 | if (ret) { | 1049 | if (ret) { |
1047 | DRM_ERROR("radeon: dpm resume failed\n"); | 1050 | DRM_ERROR("radeon: dpm resume failed\n"); |
1048 | if ((rdev->family >= CHIP_BARTS) && | 1051 | if ((rdev->family >= CHIP_BARTS) && |
1049 | (rdev->family <= CHIP_HAINAN) && | 1052 | (rdev->family <= CHIP_CAYMAN) && |
1050 | rdev->mc_fw) { | 1053 | rdev->mc_fw) { |
1051 | if (rdev->pm.default_vddc) | 1054 | if (rdev->pm.default_vddc) |
1052 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | 1055 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
@@ -1097,7 +1100,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev) | |||
1097 | radeon_pm_init_profile(rdev); | 1100 | radeon_pm_init_profile(rdev); |
1098 | /* set up the default clocks if the MC ucode is loaded */ | 1101 | /* set up the default clocks if the MC ucode is loaded */ |
1099 | if ((rdev->family >= CHIP_BARTS) && | 1102 | if ((rdev->family >= CHIP_BARTS) && |
1100 | (rdev->family <= CHIP_HAINAN) && | 1103 | (rdev->family <= CHIP_CAYMAN) && |
1101 | rdev->mc_fw) { | 1104 | rdev->mc_fw) { |
1102 | if (rdev->pm.default_vddc) | 1105 | if (rdev->pm.default_vddc) |
1103 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | 1106 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
@@ -1183,7 +1186,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) | |||
1183 | if (ret) { | 1186 | if (ret) { |
1184 | rdev->pm.dpm_enabled = false; | 1187 | rdev->pm.dpm_enabled = false; |
1185 | if ((rdev->family >= CHIP_BARTS) && | 1188 | if ((rdev->family >= CHIP_BARTS) && |
1186 | (rdev->family <= CHIP_HAINAN) && | 1189 | (rdev->family <= CHIP_CAYMAN) && |
1187 | rdev->mc_fw) { | 1190 | rdev->mc_fw) { |
1188 | if (rdev->pm.default_vddc) | 1191 | if (rdev->pm.default_vddc) |
1189 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, | 1192 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 46a25f037b84..18254e1c3e71 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) | |||
839 | * packet that is the root issue | 839 | * packet that is the root issue |
840 | */ | 840 | */ |
841 | i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; | 841 | i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; |
842 | for (j = 0; j <= (count + 32); j++) { | 842 | if (ring->ready) { |
843 | seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); | 843 | for (j = 0; j <= (count + 32); j++) { |
844 | i = (i + 1) & ring->ptr_mask; | 844 | seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); |
845 | i = (i + 1) & ring->ptr_mask; | ||
846 | } | ||
845 | } | 847 | } |
846 | return 0; | 848 | return 0; |
847 | } | 849 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index f4d6bcee9006..12e8099a0823 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) | |||
36 | struct radeon_bo *vram_obj = NULL; | 36 | struct radeon_bo *vram_obj = NULL; |
37 | struct radeon_bo **gtt_obj = NULL; | 37 | struct radeon_bo **gtt_obj = NULL; |
38 | uint64_t gtt_addr, vram_addr; | 38 | uint64_t gtt_addr, vram_addr; |
39 | unsigned i, n, size; | 39 | unsigned n, size; |
40 | int r, ring; | 40 | int i, r, ring; |
41 | 41 | ||
42 | switch (flag) { | 42 | switch (flag) { |
43 | case RADEON_TEST_COPY_DMA: | 43 | case RADEON_TEST_COPY_DMA: |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1a01bbff9bfa..4f2e73f79638 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
476 | return -EINVAL; | 476 | return -EINVAL; |
477 | } | 477 | } |
478 | 478 | ||
479 | /* TODO: is this still necessary on NI+ ? */ | 479 | if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) && |
480 | if ((cmd == 0 || cmd == 0x3) && | ||
481 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { | 480 | (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { |
482 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | 481 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", |
483 | start, end); | 482 | start, end); |
@@ -799,7 +798,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev) | |||
799 | (rdev->pm.dpm.hd != hd)) { | 798 | (rdev->pm.dpm.hd != hd)) { |
800 | rdev->pm.dpm.sd = sd; | 799 | rdev->pm.dpm.sd = sd; |
801 | rdev->pm.dpm.hd = hd; | 800 | rdev->pm.dpm.hd = hd; |
802 | streams_changed = true; | 801 | /* disable this for now */ |
802 | /*streams_changed = true;*/ | ||
803 | } | 803 | } |
804 | } | 804 | } |
805 | 805 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index c354c1094967..d4652af425b8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -85,6 +85,9 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev, | |||
85 | uint32_t incr, uint32_t flags); | 85 | uint32_t incr, uint32_t flags); |
86 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, | 86 | static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, |
87 | bool enable); | 87 | bool enable); |
88 | static void si_fini_pg(struct radeon_device *rdev); | ||
89 | static void si_fini_cg(struct radeon_device *rdev); | ||
90 | static void si_rlc_stop(struct radeon_device *rdev); | ||
88 | 91 | ||
89 | static const u32 verde_rlc_save_restore_register_list[] = | 92 | static const u32 verde_rlc_save_restore_register_list[] = |
90 | { | 93 | { |
@@ -3608,6 +3611,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | |||
3608 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | 3611 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
3609 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | 3612 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); |
3610 | 3613 | ||
3614 | /* disable PG/CG */ | ||
3615 | si_fini_pg(rdev); | ||
3616 | si_fini_cg(rdev); | ||
3617 | |||
3618 | /* stop the rlc */ | ||
3619 | si_rlc_stop(rdev); | ||
3620 | |||
3611 | /* Disable CP parsing/prefetching */ | 3621 | /* Disable CP parsing/prefetching */ |
3612 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); | 3622 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); |
3613 | 3623 | ||
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index cfe5d4d28915..2332aa1bf93c 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2910 | bool disable_sclk_switching = false; | 2910 | bool disable_sclk_switching = false; |
2911 | u32 mclk, sclk; | 2911 | u32 mclk, sclk; |
2912 | u16 vddc, vddci; | 2912 | u16 vddc, vddci; |
2913 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; | ||
2913 | int i; | 2914 | int i; |
2914 | 2915 | ||
2915 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 2916 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
@@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2943 | } | 2944 | } |
2944 | } | 2945 | } |
2945 | 2946 | ||
2947 | /* limit clocks to max supported clocks based on voltage dependency tables */ | ||
2948 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, | ||
2949 | &max_sclk_vddc); | ||
2950 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, | ||
2951 | &max_mclk_vddci); | ||
2952 | btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, | ||
2953 | &max_mclk_vddc); | ||
2954 | |||
2955 | for (i = 0; i < ps->performance_level_count; i++) { | ||
2956 | if (max_sclk_vddc) { | ||
2957 | if (ps->performance_levels[i].sclk > max_sclk_vddc) | ||
2958 | ps->performance_levels[i].sclk = max_sclk_vddc; | ||
2959 | } | ||
2960 | if (max_mclk_vddci) { | ||
2961 | if (ps->performance_levels[i].mclk > max_mclk_vddci) | ||
2962 | ps->performance_levels[i].mclk = max_mclk_vddci; | ||
2963 | } | ||
2964 | if (max_mclk_vddc) { | ||
2965 | if (ps->performance_levels[i].mclk > max_mclk_vddc) | ||
2966 | ps->performance_levels[i].mclk = max_mclk_vddc; | ||
2967 | } | ||
2968 | } | ||
2969 | |||
2946 | /* XXX validate the min clocks required for display */ | 2970 | /* XXX validate the min clocks required for display */ |
2947 | 2971 | ||
2948 | if (disable_mclk_switching) { | 2972 | if (disable_mclk_switching) { |
@@ -5184,7 +5208,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev, | |||
5184 | table->mc_reg_table_entry[k].mc_data[j] |= 0x100; | 5208 | table->mc_reg_table_entry[k].mc_data[j] |= 0x100; |
5185 | } | 5209 | } |
5186 | j++; | 5210 | j++; |
5187 | if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) | 5211 | if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) |
5188 | return -EINVAL; | 5212 | return -EINVAL; |
5189 | 5213 | ||
5190 | if (!pi->mem_gddr5) { | 5214 | if (!pi->mem_gddr5) { |
@@ -5194,7 +5218,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev, | |||
5194 | table->mc_reg_table_entry[k].mc_data[j] = | 5218 | table->mc_reg_table_entry[k].mc_data[j] = |
5195 | (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; | 5219 | (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; |
5196 | j++; | 5220 | j++; |
5197 | if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) | 5221 | if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) |
5198 | return -EINVAL; | 5222 | return -EINVAL; |
5199 | } | 5223 | } |
5200 | break; | 5224 | break; |
@@ -5207,7 +5231,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev, | |||
5207 | (temp_reg & 0xffff0000) | | 5231 | (temp_reg & 0xffff0000) | |
5208 | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); | 5232 | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); |
5209 | j++; | 5233 | j++; |
5210 | if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) | 5234 | if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) |
5211 | return -EINVAL; | 5235 | return -EINVAL; |
5212 | break; | 5236 | break; |
5213 | default: | 5237 | default: |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 52d2ab6b67a0..7e2e0ea66a00 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -1553,7 +1553,7 @@ | |||
1553 | * 6. COMMAND [30:21] | BYTE_COUNT [20:0] | 1553 | * 6. COMMAND [30:21] | BYTE_COUNT [20:0] |
1554 | */ | 1554 | */ |
1555 | # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) | 1555 | # define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) |
1556 | /* 0 - SRC_ADDR | 1556 | /* 0 - DST_ADDR |
1557 | * 1 - GDS | 1557 | * 1 - GDS |
1558 | */ | 1558 | */ |
1559 | # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) | 1559 | # define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) |
@@ -1568,7 +1568,7 @@ | |||
1568 | # define PACKET3_CP_DMA_CP_SYNC (1 << 31) | 1568 | # define PACKET3_CP_DMA_CP_SYNC (1 << 31) |
1569 | /* COMMAND */ | 1569 | /* COMMAND */ |
1570 | # define PACKET3_CP_DMA_DIS_WC (1 << 21) | 1570 | # define PACKET3_CP_DMA_DIS_WC (1 << 21) |
1571 | # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) | 1571 | # define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22) |
1572 | /* 0 - none | 1572 | /* 0 - none |
1573 | * 1 - 8 in 16 | 1573 | * 1 - 8 in 16 |
1574 | * 2 - 8 in 32 | 1574 | * 2 - 8 in 32 |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 7f998bf1cc9d..9364129ba292 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
@@ -1868,7 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev) | |||
1868 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) | 1868 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) |
1869 | pi->at[i] = TRINITY_AT_DFLT; | 1869 | pi->at[i] = TRINITY_AT_DFLT; |
1870 | 1870 | ||
1871 | pi->enable_bapm = true; | 1871 | pi->enable_bapm = false; |
1872 | pi->enable_nbps_policy = true; | 1872 | pi->enable_nbps_policy = true; |
1873 | pi->enable_sclk_ds = true; | 1873 | pi->enable_sclk_ds = true; |
1874 | pi->enable_gfx_power_gating = true; | 1874 | pi->enable_gfx_power_gating = true; |
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 7266805d9786..3100fa9cb52f 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c | |||
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev) | |||
212 | /* enable VCPU clock */ | 212 | /* enable VCPU clock */ |
213 | WREG32(UVD_VCPU_CNTL, 1 << 9); | 213 | WREG32(UVD_VCPU_CNTL, 1 << 9); |
214 | 214 | ||
215 | /* enable UMC */ | 215 | /* enable UMC and NC0 */ |
216 | WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); | 216 | WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13))); |
217 | 217 | ||
218 | /* boot up the VCPU */ | 218 | /* boot up the VCPU */ |
219 | WREG32(UVD_SOFT_RESET, 0); | 219 | WREG32(UVD_SOFT_RESET, 0); |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 71b70e3a7a71..c91d547191dd 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -241,6 +241,7 @@ config HID_HOLTEK | |||
241 | - Sharkoon Drakonia / Perixx MX-2000 gaming mice | 241 | - Sharkoon Drakonia / Perixx MX-2000 gaming mice |
242 | - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / | 242 | - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 / |
243 | Zalman ZM-GM1 | 243 | Zalman ZM-GM1 |
244 | - SHARKOON DarkGlider Gaming mouse | ||
244 | 245 | ||
245 | config HOLTEK_FF | 246 | config HOLTEK_FF |
246 | bool "Holtek On Line Grip force feedback support" | 247 | bool "Holtek On Line Grip force feedback support" |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index b8470b1a10fe..5a8c01112a23 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1715,6 +1715,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1715 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, | 1715 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, |
1716 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, | 1716 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, |
1717 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, | 1717 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, |
1718 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, | ||
1718 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, | 1719 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) }, |
1719 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, | 1720 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, |
1720 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, | 1721 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, |
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index 7e6db3cf46f9..e696566cde46 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 | 27 | * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000 |
28 | * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 | 28 | * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200 |
29 | * and Zalman ZM-GM1 | 29 | * and Zalman ZM-GM1 |
30 | * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse | ||
30 | */ | 31 | */ |
31 | 32 | ||
32 | static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, | 33 | static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
@@ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
46 | } | 47 | } |
47 | break; | 48 | break; |
48 | case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: | 49 | case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A: |
50 | case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081: | ||
49 | if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f | 51 | if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f |
50 | && rdesc[111] == 0xff && rdesc[112] == 0x7f) { | 52 | && rdesc[111] == 0xff && rdesc[112] == 0x7f) { |
51 | hid_info(hdev, "Fixing up report descriptor\n"); | 53 | hid_info(hdev, "Fixing up report descriptor\n"); |
@@ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = { | |||
63 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, | 65 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, |
64 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, | 66 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, |
65 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, | 67 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, |
68 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, | ||
69 | USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, | ||
66 | { } | 70 | { } |
67 | }; | 71 | }; |
68 | MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); | 72 | MODULE_DEVICE_TABLE(hid, holtek_mouse_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index e60e8d530697..9cbc7ab07dfa 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -450,6 +450,7 @@ | |||
450 | #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 | 450 | #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055 |
451 | #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 | 451 | #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067 0xa067 |
452 | #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a | 452 | #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A 0xa04a |
453 | #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 | ||
453 | 454 | ||
454 | #define USB_VENDOR_ID_IMATION 0x0718 | 455 | #define USB_VENDOR_ID_IMATION 0x0718 |
455 | #define USB_DEVICE_ID_DISC_STAKKA 0xd000 | 456 | #define USB_DEVICE_ID_DISC_STAKKA 0xd000 |
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 602c188e9d86..6101816a7ddd 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c | |||
@@ -382,7 +382,7 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp, | |||
382 | } | 382 | } |
383 | #define PROFILE_ATTR(number) \ | 383 | #define PROFILE_ATTR(number) \ |
384 | static struct bin_attribute bin_attr_profile##number = { \ | 384 | static struct bin_attribute bin_attr_profile##number = { \ |
385 | .attr = { .name = "profile##number", .mode = 0660 }, \ | 385 | .attr = { .name = "profile" #number, .mode = 0660 }, \ |
386 | .size = sizeof(struct kone_profile), \ | 386 | .size = sizeof(struct kone_profile), \ |
387 | .read = kone_sysfs_read_profilex, \ | 387 | .read = kone_sysfs_read_profilex, \ |
388 | .write = kone_sysfs_write_profilex, \ | 388 | .write = kone_sysfs_write_profilex, \ |
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c index 5ddf605b6b89..5e99fcdc71b9 100644 --- a/drivers/hid/hid-roccat-koneplus.c +++ b/drivers/hid/hid-roccat-koneplus.c | |||
@@ -229,13 +229,13 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp, | |||
229 | 229 | ||
230 | #define PROFILE_ATTR(number) \ | 230 | #define PROFILE_ATTR(number) \ |
231 | static struct bin_attribute bin_attr_profile##number##_settings = { \ | 231 | static struct bin_attribute bin_attr_profile##number##_settings = { \ |
232 | .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ | 232 | .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \ |
233 | .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \ | 233 | .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \ |
234 | .read = koneplus_sysfs_read_profilex_settings, \ | 234 | .read = koneplus_sysfs_read_profilex_settings, \ |
235 | .private = &profile_numbers[number-1], \ | 235 | .private = &profile_numbers[number-1], \ |
236 | }; \ | 236 | }; \ |
237 | static struct bin_attribute bin_attr_profile##number##_buttons = { \ | 237 | static struct bin_attribute bin_attr_profile##number##_buttons = { \ |
238 | .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ | 238 | .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \ |
239 | .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \ | 239 | .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \ |
240 | .read = koneplus_sysfs_read_profilex_buttons, \ | 240 | .read = koneplus_sysfs_read_profilex_buttons, \ |
241 | .private = &profile_numbers[number-1], \ | 241 | .private = &profile_numbers[number-1], \ |
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 515bc03136c0..0c8e1ef0b67d 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c | |||
@@ -257,13 +257,13 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp, | |||
257 | 257 | ||
258 | #define PROFILE_ATTR(number) \ | 258 | #define PROFILE_ATTR(number) \ |
259 | static struct bin_attribute bin_attr_profile##number##_settings = { \ | 259 | static struct bin_attribute bin_attr_profile##number##_settings = { \ |
260 | .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ | 260 | .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \ |
261 | .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \ | 261 | .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \ |
262 | .read = kovaplus_sysfs_read_profilex_settings, \ | 262 | .read = kovaplus_sysfs_read_profilex_settings, \ |
263 | .private = &profile_numbers[number-1], \ | 263 | .private = &profile_numbers[number-1], \ |
264 | }; \ | 264 | }; \ |
265 | static struct bin_attribute bin_attr_profile##number##_buttons = { \ | 265 | static struct bin_attribute bin_attr_profile##number##_buttons = { \ |
266 | .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ | 266 | .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \ |
267 | .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \ | 267 | .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \ |
268 | .read = kovaplus_sysfs_read_profilex_buttons, \ | 268 | .read = kovaplus_sysfs_read_profilex_buttons, \ |
269 | .private = &profile_numbers[number-1], \ | 269 | .private = &profile_numbers[number-1], \ |
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 5a6dbbeee790..1a07e07d99a0 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c | |||
@@ -225,13 +225,13 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp, | |||
225 | 225 | ||
226 | #define PROFILE_ATTR(number) \ | 226 | #define PROFILE_ATTR(number) \ |
227 | static struct bin_attribute bin_attr_profile##number##_settings = { \ | 227 | static struct bin_attribute bin_attr_profile##number##_settings = { \ |
228 | .attr = { .name = "profile##number##_settings", .mode = 0440 }, \ | 228 | .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \ |
229 | .size = PYRA_SIZE_PROFILE_SETTINGS, \ | 229 | .size = PYRA_SIZE_PROFILE_SETTINGS, \ |
230 | .read = pyra_sysfs_read_profilex_settings, \ | 230 | .read = pyra_sysfs_read_profilex_settings, \ |
231 | .private = &profile_numbers[number-1], \ | 231 | .private = &profile_numbers[number-1], \ |
232 | }; \ | 232 | }; \ |
233 | static struct bin_attribute bin_attr_profile##number##_buttons = { \ | 233 | static struct bin_attribute bin_attr_profile##number##_buttons = { \ |
234 | .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \ | 234 | .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \ |
235 | .size = PYRA_SIZE_PROFILE_BUTTONS, \ | 235 | .size = PYRA_SIZE_PROFILE_BUTTONS, \ |
236 | .read = pyra_sysfs_read_profilex_buttons, \ | 236 | .read = pyra_sysfs_read_profilex_buttons, \ |
237 | .private = &profile_numbers[number-1], \ | 237 | .private = &profile_numbers[number-1], \ |
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c index 2e7d644dba18..71adf9e60b13 100644 --- a/drivers/hid/hid-wiimote-modules.c +++ b/drivers/hid/hid-wiimote-modules.c | |||
@@ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = { | |||
119 | * the rumble motor, this flag shouldn't be set. | 119 | * the rumble motor, this flag shouldn't be set. |
120 | */ | 120 | */ |
121 | 121 | ||
122 | /* used by wiimod_rumble and wiipro_rumble */ | ||
123 | static void wiimod_rumble_worker(struct work_struct *work) | ||
124 | { | ||
125 | struct wiimote_data *wdata = container_of(work, struct wiimote_data, | ||
126 | rumble_worker); | ||
127 | |||
128 | spin_lock_irq(&wdata->state.lock); | ||
129 | wiiproto_req_rumble(wdata, wdata->state.cache_rumble); | ||
130 | spin_unlock_irq(&wdata->state.lock); | ||
131 | } | ||
132 | |||
122 | static int wiimod_rumble_play(struct input_dev *dev, void *data, | 133 | static int wiimod_rumble_play(struct input_dev *dev, void *data, |
123 | struct ff_effect *eff) | 134 | struct ff_effect *eff) |
124 | { | 135 | { |
125 | struct wiimote_data *wdata = input_get_drvdata(dev); | 136 | struct wiimote_data *wdata = input_get_drvdata(dev); |
126 | __u8 value; | 137 | __u8 value; |
127 | unsigned long flags; | ||
128 | 138 | ||
129 | /* | 139 | /* |
130 | * The wiimote supports only a single rumble motor so if any magnitude | 140 | * The wiimote supports only a single rumble motor so if any magnitude |
@@ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data, | |||
137 | else | 147 | else |
138 | value = 0; | 148 | value = 0; |
139 | 149 | ||
140 | spin_lock_irqsave(&wdata->state.lock, flags); | 150 | /* Locking state.lock here might deadlock with input_event() calls. |
141 | wiiproto_req_rumble(wdata, value); | 151 | * schedule_work acts as barrier. Merging multiple changes is fine. */ |
142 | spin_unlock_irqrestore(&wdata->state.lock, flags); | 152 | wdata->state.cache_rumble = value; |
153 | schedule_work(&wdata->rumble_worker); | ||
143 | 154 | ||
144 | return 0; | 155 | return 0; |
145 | } | 156 | } |
@@ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data, | |||
147 | static int wiimod_rumble_probe(const struct wiimod_ops *ops, | 158 | static int wiimod_rumble_probe(const struct wiimod_ops *ops, |
148 | struct wiimote_data *wdata) | 159 | struct wiimote_data *wdata) |
149 | { | 160 | { |
161 | INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); | ||
162 | |||
150 | set_bit(FF_RUMBLE, wdata->input->ffbit); | 163 | set_bit(FF_RUMBLE, wdata->input->ffbit); |
151 | if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) | 164 | if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play)) |
152 | return -ENOMEM; | 165 | return -ENOMEM; |
@@ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops, | |||
159 | { | 172 | { |
160 | unsigned long flags; | 173 | unsigned long flags; |
161 | 174 | ||
175 | cancel_work_sync(&wdata->rumble_worker); | ||
176 | |||
162 | spin_lock_irqsave(&wdata->state.lock, flags); | 177 | spin_lock_irqsave(&wdata->state.lock, flags); |
163 | wiiproto_req_rumble(wdata, 0); | 178 | wiiproto_req_rumble(wdata, 0); |
164 | spin_unlock_irqrestore(&wdata->state.lock, flags); | 179 | spin_unlock_irqrestore(&wdata->state.lock, flags); |
@@ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data, | |||
1731 | { | 1746 | { |
1732 | struct wiimote_data *wdata = input_get_drvdata(dev); | 1747 | struct wiimote_data *wdata = input_get_drvdata(dev); |
1733 | __u8 value; | 1748 | __u8 value; |
1734 | unsigned long flags; | ||
1735 | 1749 | ||
1736 | /* | 1750 | /* |
1737 | * The wiimote supports only a single rumble motor so if any magnitude | 1751 | * The wiimote supports only a single rumble motor so if any magnitude |
@@ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data, | |||
1744 | else | 1758 | else |
1745 | value = 0; | 1759 | value = 0; |
1746 | 1760 | ||
1747 | spin_lock_irqsave(&wdata->state.lock, flags); | 1761 | /* Locking state.lock here might deadlock with input_event() calls. |
1748 | wiiproto_req_rumble(wdata, value); | 1762 | * schedule_work acts as barrier. Merging multiple changes is fine. */ |
1749 | spin_unlock_irqrestore(&wdata->state.lock, flags); | 1763 | wdata->state.cache_rumble = value; |
1764 | schedule_work(&wdata->rumble_worker); | ||
1750 | 1765 | ||
1751 | return 0; | 1766 | return 0; |
1752 | } | 1767 | } |
@@ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops, | |||
1756 | { | 1771 | { |
1757 | int ret, i; | 1772 | int ret, i; |
1758 | 1773 | ||
1774 | INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker); | ||
1775 | |||
1759 | wdata->extension.input = input_allocate_device(); | 1776 | wdata->extension.input = input_allocate_device(); |
1760 | if (!wdata->extension.input) | 1777 | if (!wdata->extension.input) |
1761 | return -ENOMEM; | 1778 | return -ENOMEM; |
@@ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops, | |||
1817 | if (!wdata->extension.input) | 1834 | if (!wdata->extension.input) |
1818 | return; | 1835 | return; |
1819 | 1836 | ||
1837 | input_unregister_device(wdata->extension.input); | ||
1838 | wdata->extension.input = NULL; | ||
1839 | cancel_work_sync(&wdata->rumble_worker); | ||
1840 | |||
1820 | spin_lock_irqsave(&wdata->state.lock, flags); | 1841 | spin_lock_irqsave(&wdata->state.lock, flags); |
1821 | wiiproto_req_rumble(wdata, 0); | 1842 | wiiproto_req_rumble(wdata, 0); |
1822 | spin_unlock_irqrestore(&wdata->state.lock, flags); | 1843 | spin_unlock_irqrestore(&wdata->state.lock, flags); |
1823 | |||
1824 | input_unregister_device(wdata->extension.input); | ||
1825 | wdata->extension.input = NULL; | ||
1826 | } | 1844 | } |
1827 | 1845 | ||
1828 | static const struct wiimod_ops wiimod_pro = { | 1846 | static const struct wiimod_ops wiimod_pro = { |
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h index f1474f372c0b..75db0c400037 100644 --- a/drivers/hid/hid-wiimote.h +++ b/drivers/hid/hid-wiimote.h | |||
@@ -133,13 +133,15 @@ struct wiimote_state { | |||
133 | __u8 *cmd_read_buf; | 133 | __u8 *cmd_read_buf; |
134 | __u8 cmd_read_size; | 134 | __u8 cmd_read_size; |
135 | 135 | ||
136 | /* calibration data */ | 136 | /* calibration/cache data */ |
137 | __u16 calib_bboard[4][3]; | 137 | __u16 calib_bboard[4][3]; |
138 | __u8 cache_rumble; | ||
138 | }; | 139 | }; |
139 | 140 | ||
140 | struct wiimote_data { | 141 | struct wiimote_data { |
141 | struct hid_device *hdev; | 142 | struct hid_device *hdev; |
142 | struct input_dev *input; | 143 | struct input_dev *input; |
144 | struct work_struct rumble_worker; | ||
143 | struct led_classdev *leds[4]; | 145 | struct led_classdev *leds[4]; |
144 | struct input_dev *accel; | 146 | struct input_dev *accel; |
145 | struct input_dev *ir; | 147 | struct input_dev *ir; |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 8918dd12bb69..6a6dd5cd7833 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -308,18 +308,25 @@ static int hidraw_fasync(int fd, struct file *file, int on) | |||
308 | static void drop_ref(struct hidraw *hidraw, int exists_bit) | 308 | static void drop_ref(struct hidraw *hidraw, int exists_bit) |
309 | { | 309 | { |
310 | if (exists_bit) { | 310 | if (exists_bit) { |
311 | hid_hw_close(hidraw->hid); | ||
312 | hidraw->exist = 0; | 311 | hidraw->exist = 0; |
313 | if (hidraw->open) | 312 | if (hidraw->open) { |
313 | hid_hw_close(hidraw->hid); | ||
314 | wake_up_interruptible(&hidraw->wait); | 314 | wake_up_interruptible(&hidraw->wait); |
315 | } | ||
315 | } else { | 316 | } else { |
316 | --hidraw->open; | 317 | --hidraw->open; |
317 | } | 318 | } |
318 | 319 | if (!hidraw->open) { | |
319 | if (!hidraw->open && !hidraw->exist) { | 320 | if (!hidraw->exist) { |
320 | device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); | 321 | device_destroy(hidraw_class, |
321 | hidraw_table[hidraw->minor] = NULL; | 322 | MKDEV(hidraw_major, hidraw->minor)); |
322 | kfree(hidraw); | 323 | hidraw_table[hidraw->minor] = NULL; |
324 | kfree(hidraw); | ||
325 | } else { | ||
326 | /* close device for last reader */ | ||
327 | hid_hw_power(hidraw->hid, PM_HINT_NORMAL); | ||
328 | hid_hw_close(hidraw->hid); | ||
329 | } | ||
323 | } | 330 | } |
324 | } | 331 | } |
325 | 332 | ||
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 5bf2fb785844..93b00d76374c 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c | |||
@@ -615,7 +615,7 @@ static const struct file_operations uhid_fops = { | |||
615 | 615 | ||
616 | static struct miscdevice uhid_misc = { | 616 | static struct miscdevice uhid_misc = { |
617 | .fops = &uhid_fops, | 617 | .fops = &uhid_fops, |
618 | .minor = MISC_DYNAMIC_MINOR, | 618 | .minor = UHID_MINOR, |
619 | .name = UHID_NAME, | 619 | .name = UHID_NAME, |
620 | }; | 620 | }; |
621 | 621 | ||
@@ -634,4 +634,5 @@ module_exit(uhid_exit); | |||
634 | MODULE_LICENSE("GPL"); | 634 | MODULE_LICENSE("GPL"); |
635 | MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); | 635 | MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); |
636 | MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); | 636 | MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); |
637 | MODULE_ALIAS_MISCDEV(UHID_MINOR); | ||
637 | MODULE_ALIAS("devname:" UHID_NAME); | 638 | MODULE_ALIAS("devname:" UHID_NAME); |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 8f4743ab5fb2..936093e0271e 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -195,7 +195,7 @@ int vmbus_connect(void) | |||
195 | 195 | ||
196 | do { | 196 | do { |
197 | ret = vmbus_negotiate_version(msginfo, version); | 197 | ret = vmbus_negotiate_version(msginfo, version); |
198 | if (ret) | 198 | if (ret == -ETIMEDOUT) |
199 | goto cleanup; | 199 | goto cleanup; |
200 | 200 | ||
201 | if (vmbus_connection.conn_state == CONNECTED) | 201 | if (vmbus_connection.conn_state == CONNECTED) |
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index 28b03325b872..09988b289622 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c | |||
@@ -32,13 +32,17 @@ | |||
32 | /* | 32 | /* |
33 | * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) | 33 | * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7) |
34 | */ | 34 | */ |
35 | #define WS2008_SRV_MAJOR 1 | ||
36 | #define WS2008_SRV_MINOR 0 | ||
37 | #define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR) | ||
38 | |||
35 | #define WIN7_SRV_MAJOR 3 | 39 | #define WIN7_SRV_MAJOR 3 |
36 | #define WIN7_SRV_MINOR 0 | 40 | #define WIN7_SRV_MINOR 0 |
37 | #define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) | 41 | #define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR) |
38 | 42 | ||
39 | #define WIN8_SRV_MAJOR 4 | 43 | #define WIN8_SRV_MAJOR 4 |
40 | #define WIN8_SRV_MINOR 0 | 44 | #define WIN8_SRV_MINOR 0 |
41 | #define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) | 45 | #define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) |
42 | 46 | ||
43 | /* | 47 | /* |
44 | * Global state maintained for transaction that is being processed. | 48 | * Global state maintained for transaction that is being processed. |
@@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context) | |||
587 | 591 | ||
588 | struct icmsg_hdr *icmsghdrp; | 592 | struct icmsg_hdr *icmsghdrp; |
589 | struct icmsg_negotiate *negop = NULL; | 593 | struct icmsg_negotiate *negop = NULL; |
594 | int util_fw_version; | ||
595 | int kvp_srv_version; | ||
590 | 596 | ||
591 | if (kvp_transaction.active) { | 597 | if (kvp_transaction.active) { |
592 | /* | 598 | /* |
@@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context) | |||
606 | 612 | ||
607 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { | 613 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
608 | /* | 614 | /* |
609 | * We start with win8 version and if the host cannot | 615 | * Based on the host, select appropriate |
610 | * support that we use the previous version. | 616 | * framework and service versions we will |
617 | * negotiate. | ||
611 | */ | 618 | */ |
612 | if (vmbus_prep_negotiate_resp(icmsghdrp, negop, | 619 | switch (vmbus_proto_version) { |
613 | recv_buffer, UTIL_FW_MAJOR_MINOR, | 620 | case (VERSION_WS2008): |
614 | WIN8_SRV_MAJOR_MINOR)) | 621 | util_fw_version = UTIL_WS2K8_FW_VERSION; |
615 | goto done; | 622 | kvp_srv_version = WS2008_SRV_VERSION; |
616 | 623 | break; | |
624 | case (VERSION_WIN7): | ||
625 | util_fw_version = UTIL_FW_VERSION; | ||
626 | kvp_srv_version = WIN7_SRV_VERSION; | ||
627 | break; | ||
628 | default: | ||
629 | util_fw_version = UTIL_FW_VERSION; | ||
630 | kvp_srv_version = WIN8_SRV_VERSION; | ||
631 | } | ||
617 | vmbus_prep_negotiate_resp(icmsghdrp, negop, | 632 | vmbus_prep_negotiate_resp(icmsghdrp, negop, |
618 | recv_buffer, UTIL_FW_MAJOR_MINOR, | 633 | recv_buffer, util_fw_version, |
619 | WIN7_SRV_MAJOR_MINOR); | 634 | kvp_srv_version); |
620 | 635 | ||
621 | } else { | 636 | } else { |
622 | kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ | 637 | kvp_msg = (struct hv_kvp_msg *)&recv_buffer[ |
@@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context) | |||
649 | return; | 664 | return; |
650 | 665 | ||
651 | } | 666 | } |
652 | done: | ||
653 | 667 | ||
654 | icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | 668 | icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
655 | | ICMSGHDRFLAG_RESPONSE; | 669 | | ICMSGHDRFLAG_RESPONSE; |
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index e4572f3f2834..0c3546224376 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #define VSS_MAJOR 5 | 27 | #define VSS_MAJOR 5 |
28 | #define VSS_MINOR 0 | 28 | #define VSS_MINOR 0 |
29 | #define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR) | 29 | #define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) |
30 | 30 | ||
31 | 31 | ||
32 | 32 | ||
@@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context) | |||
190 | 190 | ||
191 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { | 191 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
192 | vmbus_prep_negotiate_resp(icmsghdrp, negop, | 192 | vmbus_prep_negotiate_resp(icmsghdrp, negop, |
193 | recv_buffer, UTIL_FW_MAJOR_MINOR, | 193 | recv_buffer, UTIL_FW_VERSION, |
194 | VSS_MAJOR_MINOR); | 194 | VSS_VERSION); |
195 | } else { | 195 | } else { |
196 | vss_msg = (struct hv_vss_msg *)&recv_buffer[ | 196 | vss_msg = (struct hv_vss_msg *)&recv_buffer[ |
197 | sizeof(struct vmbuspipe_hdr) + | 197 | sizeof(struct vmbuspipe_hdr) + |
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index cb82233541b1..273e3ddb3a20 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c | |||
@@ -28,17 +28,32 @@ | |||
28 | #include <linux/reboot.h> | 28 | #include <linux/reboot.h> |
29 | #include <linux/hyperv.h> | 29 | #include <linux/hyperv.h> |
30 | 30 | ||
31 | #define SHUTDOWN_MAJOR 3 | ||
32 | #define SHUTDOWN_MINOR 0 | ||
33 | #define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR) | ||
34 | 31 | ||
35 | #define TIMESYNCH_MAJOR 3 | 32 | #define SD_MAJOR 3 |
36 | #define TIMESYNCH_MINOR 0 | 33 | #define SD_MINOR 0 |
37 | #define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR) | 34 | #define SD_VERSION (SD_MAJOR << 16 | SD_MINOR) |
38 | 35 | ||
39 | #define HEARTBEAT_MAJOR 3 | 36 | #define SD_WS2008_MAJOR 1 |
40 | #define HEARTBEAT_MINOR 0 | 37 | #define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR) |
41 | #define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR) | 38 | |
39 | #define TS_MAJOR 3 | ||
40 | #define TS_MINOR 0 | ||
41 | #define TS_VERSION (TS_MAJOR << 16 | TS_MINOR) | ||
42 | |||
43 | #define TS_WS2008_MAJOR 1 | ||
44 | #define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR) | ||
45 | |||
46 | #define HB_MAJOR 3 | ||
47 | #define HB_MINOR 0 | ||
48 | #define HB_VERSION (HB_MAJOR << 16 | HB_MINOR) | ||
49 | |||
50 | #define HB_WS2008_MAJOR 1 | ||
51 | #define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR) | ||
52 | |||
53 | static int sd_srv_version; | ||
54 | static int ts_srv_version; | ||
55 | static int hb_srv_version; | ||
56 | static int util_fw_version; | ||
42 | 57 | ||
43 | static void shutdown_onchannelcallback(void *context); | 58 | static void shutdown_onchannelcallback(void *context); |
44 | static struct hv_util_service util_shutdown = { | 59 | static struct hv_util_service util_shutdown = { |
@@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context) | |||
99 | 114 | ||
100 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { | 115 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
101 | vmbus_prep_negotiate_resp(icmsghdrp, negop, | 116 | vmbus_prep_negotiate_resp(icmsghdrp, negop, |
102 | shut_txf_buf, UTIL_FW_MAJOR_MINOR, | 117 | shut_txf_buf, util_fw_version, |
103 | SHUTDOWN_MAJOR_MINOR); | 118 | sd_srv_version); |
104 | } else { | 119 | } else { |
105 | shutdown_msg = | 120 | shutdown_msg = |
106 | (struct shutdown_msg_data *)&shut_txf_buf[ | 121 | (struct shutdown_msg_data *)&shut_txf_buf[ |
@@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context) | |||
216 | struct icmsg_hdr *icmsghdrp; | 231 | struct icmsg_hdr *icmsghdrp; |
217 | struct ictimesync_data *timedatap; | 232 | struct ictimesync_data *timedatap; |
218 | u8 *time_txf_buf = util_timesynch.recv_buffer; | 233 | u8 *time_txf_buf = util_timesynch.recv_buffer; |
234 | struct icmsg_negotiate *negop = NULL; | ||
219 | 235 | ||
220 | vmbus_recvpacket(channel, time_txf_buf, | 236 | vmbus_recvpacket(channel, time_txf_buf, |
221 | PAGE_SIZE, &recvlen, &requestid); | 237 | PAGE_SIZE, &recvlen, &requestid); |
@@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context) | |||
225 | sizeof(struct vmbuspipe_hdr)]; | 241 | sizeof(struct vmbuspipe_hdr)]; |
226 | 242 | ||
227 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { | 243 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
228 | vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf, | 244 | vmbus_prep_negotiate_resp(icmsghdrp, negop, |
229 | UTIL_FW_MAJOR_MINOR, | 245 | time_txf_buf, |
230 | TIMESYNCH_MAJOR_MINOR); | 246 | util_fw_version, |
247 | ts_srv_version); | ||
231 | } else { | 248 | } else { |
232 | timedatap = (struct ictimesync_data *)&time_txf_buf[ | 249 | timedatap = (struct ictimesync_data *)&time_txf_buf[ |
233 | sizeof(struct vmbuspipe_hdr) + | 250 | sizeof(struct vmbuspipe_hdr) + |
@@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context) | |||
257 | struct icmsg_hdr *icmsghdrp; | 274 | struct icmsg_hdr *icmsghdrp; |
258 | struct heartbeat_msg_data *heartbeat_msg; | 275 | struct heartbeat_msg_data *heartbeat_msg; |
259 | u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; | 276 | u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; |
277 | struct icmsg_negotiate *negop = NULL; | ||
260 | 278 | ||
261 | vmbus_recvpacket(channel, hbeat_txf_buf, | 279 | vmbus_recvpacket(channel, hbeat_txf_buf, |
262 | PAGE_SIZE, &recvlen, &requestid); | 280 | PAGE_SIZE, &recvlen, &requestid); |
@@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context) | |||
266 | sizeof(struct vmbuspipe_hdr)]; | 284 | sizeof(struct vmbuspipe_hdr)]; |
267 | 285 | ||
268 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { | 286 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
269 | vmbus_prep_negotiate_resp(icmsghdrp, NULL, | 287 | vmbus_prep_negotiate_resp(icmsghdrp, negop, |
270 | hbeat_txf_buf, UTIL_FW_MAJOR_MINOR, | 288 | hbeat_txf_buf, util_fw_version, |
271 | HEARTBEAT_MAJOR_MINOR); | 289 | hb_srv_version); |
272 | } else { | 290 | } else { |
273 | heartbeat_msg = | 291 | heartbeat_msg = |
274 | (struct heartbeat_msg_data *)&hbeat_txf_buf[ | 292 | (struct heartbeat_msg_data *)&hbeat_txf_buf[ |
@@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev, | |||
321 | goto error; | 339 | goto error; |
322 | 340 | ||
323 | hv_set_drvdata(dev, srv); | 341 | hv_set_drvdata(dev, srv); |
342 | /* | ||
343 | * Based on the host; initialize the framework and | ||
344 | * service version numbers we will negotiate. | ||
345 | */ | ||
346 | switch (vmbus_proto_version) { | ||
347 | case (VERSION_WS2008): | ||
348 | util_fw_version = UTIL_WS2K8_FW_VERSION; | ||
349 | sd_srv_version = SD_WS2008_VERSION; | ||
350 | ts_srv_version = TS_WS2008_VERSION; | ||
351 | hb_srv_version = HB_WS2008_VERSION; | ||
352 | break; | ||
353 | |||
354 | default: | ||
355 | util_fw_version = UTIL_FW_VERSION; | ||
356 | sd_srv_version = SD_VERSION; | ||
357 | ts_srv_version = TS_VERSION; | ||
358 | hb_srv_version = HB_VERSION; | ||
359 | } | ||
360 | |||
324 | return 0; | 361 | return 0; |
325 | 362 | ||
326 | error: | 363 | error: |
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 62c2e32e25ef..3288f13d2d87 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c | |||
@@ -230,6 +230,7 @@ static int send_argument(const char *key) | |||
230 | 230 | ||
231 | static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | 231 | static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) |
232 | { | 232 | { |
233 | u8 status, data = 0; | ||
233 | int i; | 234 | int i; |
234 | 235 | ||
235 | if (send_command(cmd) || send_argument(key)) { | 236 | if (send_command(cmd) || send_argument(key)) { |
@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | |||
237 | return -EIO; | 238 | return -EIO; |
238 | } | 239 | } |
239 | 240 | ||
241 | /* This has no effect on newer (2012) SMCs */ | ||
240 | if (send_byte(len, APPLESMC_DATA_PORT)) { | 242 | if (send_byte(len, APPLESMC_DATA_PORT)) { |
241 | pr_warn("%.4s: read len fail\n", key); | 243 | pr_warn("%.4s: read len fail\n", key); |
242 | return -EIO; | 244 | return -EIO; |
@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) | |||
250 | buffer[i] = inb(APPLESMC_DATA_PORT); | 252 | buffer[i] = inb(APPLESMC_DATA_PORT); |
251 | } | 253 | } |
252 | 254 | ||
255 | /* Read the data port until bit0 is cleared */ | ||
256 | for (i = 0; i < 16; i++) { | ||
257 | udelay(APPLESMC_MIN_WAIT); | ||
258 | status = inb(APPLESMC_CMD_PORT); | ||
259 | if (!(status & 0x01)) | ||
260 | break; | ||
261 | data = inb(APPLESMC_DATA_PORT); | ||
262 | } | ||
263 | if (i) | ||
264 | pr_warn("flushed %d bytes, last value is: %d\n", i, data); | ||
265 | |||
253 | return 0; | 266 | return 0; |
254 | } | 267 | } |
255 | 268 | ||
@@ -525,16 +538,25 @@ static int applesmc_init_smcreg_try(void) | |||
525 | { | 538 | { |
526 | struct applesmc_registers *s = &smcreg; | 539 | struct applesmc_registers *s = &smcreg; |
527 | bool left_light_sensor, right_light_sensor; | 540 | bool left_light_sensor, right_light_sensor; |
541 | unsigned int count; | ||
528 | u8 tmp[1]; | 542 | u8 tmp[1]; |
529 | int ret; | 543 | int ret; |
530 | 544 | ||
531 | if (s->init_complete) | 545 | if (s->init_complete) |
532 | return 0; | 546 | return 0; |
533 | 547 | ||
534 | ret = read_register_count(&s->key_count); | 548 | ret = read_register_count(&count); |
535 | if (ret) | 549 | if (ret) |
536 | return ret; | 550 | return ret; |
537 | 551 | ||
552 | if (s->cache && s->key_count != count) { | ||
553 | pr_warn("key count changed from %d to %d\n", | ||
554 | s->key_count, count); | ||
555 | kfree(s->cache); | ||
556 | s->cache = NULL; | ||
557 | } | ||
558 | s->key_count = count; | ||
559 | |||
538 | if (!s->cache) | 560 | if (!s->cache) |
539 | s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); | 561 | s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); |
540 | if (!s->cache) | 562 | if (!s->cache) |
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index dbecf08399f8..5888feef1ac5 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c | |||
@@ -98,6 +98,8 @@ | |||
98 | 98 | ||
99 | #define DW_IC_ERR_TX_ABRT 0x1 | 99 | #define DW_IC_ERR_TX_ABRT 0x1 |
100 | 100 | ||
101 | #define DW_IC_TAR_10BITADDR_MASTER BIT(12) | ||
102 | |||
101 | /* | 103 | /* |
102 | * status codes | 104 | * status codes |
103 | */ | 105 | */ |
@@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) | |||
388 | static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | 390 | static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) |
389 | { | 391 | { |
390 | struct i2c_msg *msgs = dev->msgs; | 392 | struct i2c_msg *msgs = dev->msgs; |
391 | u32 ic_con; | 393 | u32 ic_con, ic_tar = 0; |
392 | 394 | ||
393 | /* Disable the adapter */ | 395 | /* Disable the adapter */ |
394 | __i2c_dw_enable(dev, false); | 396 | __i2c_dw_enable(dev, false); |
395 | 397 | ||
396 | /* set the slave (target) address */ | ||
397 | dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR); | ||
398 | |||
399 | /* if the slave address is ten bit address, enable 10BITADDR */ | 398 | /* if the slave address is ten bit address, enable 10BITADDR */ |
400 | ic_con = dw_readl(dev, DW_IC_CON); | 399 | ic_con = dw_readl(dev, DW_IC_CON); |
401 | if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) | 400 | if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { |
402 | ic_con |= DW_IC_CON_10BITADDR_MASTER; | 401 | ic_con |= DW_IC_CON_10BITADDR_MASTER; |
403 | else | 402 | /* |
403 | * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing | ||
404 | * mode has to be enabled via bit 12 of IC_TAR register. | ||
405 | * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be | ||
406 | * detected from registers. | ||
407 | */ | ||
408 | ic_tar = DW_IC_TAR_10BITADDR_MASTER; | ||
409 | } else { | ||
404 | ic_con &= ~DW_IC_CON_10BITADDR_MASTER; | 410 | ic_con &= ~DW_IC_CON_10BITADDR_MASTER; |
411 | } | ||
412 | |||
405 | dw_writel(dev, ic_con, DW_IC_CON); | 413 | dw_writel(dev, ic_con, DW_IC_CON); |
406 | 414 | ||
415 | /* | ||
416 | * Set the slave (target) address and enable 10-bit addressing mode | ||
417 | * if applicable. | ||
418 | */ | ||
419 | dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); | ||
420 | |||
407 | /* Enable the adapter */ | 421 | /* Enable the adapter */ |
408 | __i2c_dw_enable(dev, true); | 422 | __i2c_dw_enable(dev, true); |
409 | 423 | ||
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 4c1b60539a25..0aa01136f8d9 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -270,7 +270,8 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume); | |||
270 | MODULE_ALIAS("platform:i2c_designware"); | 270 | MODULE_ALIAS("platform:i2c_designware"); |
271 | 271 | ||
272 | static struct platform_driver dw_i2c_driver = { | 272 | static struct platform_driver dw_i2c_driver = { |
273 | .remove = dw_i2c_remove, | 273 | .probe = dw_i2c_probe, |
274 | .remove = dw_i2c_remove, | ||
274 | .driver = { | 275 | .driver = { |
275 | .name = "i2c_designware", | 276 | .name = "i2c_designware", |
276 | .owner = THIS_MODULE, | 277 | .owner = THIS_MODULE, |
@@ -282,7 +283,7 @@ static struct platform_driver dw_i2c_driver = { | |||
282 | 283 | ||
283 | static int __init dw_i2c_init_driver(void) | 284 | static int __init dw_i2c_init_driver(void) |
284 | { | 285 | { |
285 | return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe); | 286 | return platform_driver_register(&dw_i2c_driver); |
286 | } | 287 | } |
287 | subsys_initcall(dw_i2c_init_driver); | 288 | subsys_initcall(dw_i2c_init_driver); |
288 | 289 | ||
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index ccf46656bdad..1d7efa3169cd 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -365,7 +365,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) | |||
365 | clk_disable_unprepare(i2c_imx->clk); | 365 | clk_disable_unprepare(i2c_imx->clk); |
366 | } | 366 | } |
367 | 367 | ||
368 | static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, | 368 | static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, |
369 | unsigned int rate) | 369 | unsigned int rate) |
370 | { | 370 | { |
371 | struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div; | 371 | struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div; |
@@ -589,7 +589,7 @@ static struct i2c_algorithm i2c_imx_algo = { | |||
589 | .functionality = i2c_imx_func, | 589 | .functionality = i2c_imx_func, |
590 | }; | 590 | }; |
591 | 591 | ||
592 | static int __init i2c_imx_probe(struct platform_device *pdev) | 592 | static int i2c_imx_probe(struct platform_device *pdev) |
593 | { | 593 | { |
594 | const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids, | 594 | const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids, |
595 | &pdev->dev); | 595 | &pdev->dev); |
@@ -697,7 +697,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev) | |||
697 | return 0; /* Return OK */ | 697 | return 0; /* Return OK */ |
698 | } | 698 | } |
699 | 699 | ||
700 | static int __exit i2c_imx_remove(struct platform_device *pdev) | 700 | static int i2c_imx_remove(struct platform_device *pdev) |
701 | { | 701 | { |
702 | struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); | 702 | struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); |
703 | 703 | ||
@@ -715,7 +715,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev) | |||
715 | } | 715 | } |
716 | 716 | ||
717 | static struct platform_driver i2c_imx_driver = { | 717 | static struct platform_driver i2c_imx_driver = { |
718 | .remove = __exit_p(i2c_imx_remove), | 718 | .probe = i2c_imx_probe, |
719 | .remove = i2c_imx_remove, | ||
719 | .driver = { | 720 | .driver = { |
720 | .name = DRIVER_NAME, | 721 | .name = DRIVER_NAME, |
721 | .owner = THIS_MODULE, | 722 | .owner = THIS_MODULE, |
@@ -726,7 +727,7 @@ static struct platform_driver i2c_imx_driver = { | |||
726 | 727 | ||
727 | static int __init i2c_adap_imx_init(void) | 728 | static int __init i2c_adap_imx_init(void) |
728 | { | 729 | { |
729 | return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe); | 730 | return platform_driver_register(&i2c_imx_driver); |
730 | } | 731 | } |
731 | subsys_initcall(i2c_adap_imx_init); | 732 | subsys_initcall(i2c_adap_imx_init); |
732 | 733 | ||
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 8ed79a086f85..1672effbcebb 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
@@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, | |||
393 | 393 | ||
394 | desc = &priv->hw[priv->head]; | 394 | desc = &priv->hw[priv->head]; |
395 | 395 | ||
396 | /* Initialize the DMA buffer */ | ||
397 | memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer)); | ||
398 | |||
396 | /* Initialize the descriptor */ | 399 | /* Initialize the descriptor */ |
397 | memset(desc, 0, sizeof(struct ismt_desc)); | 400 | memset(desc, 0, sizeof(struct ismt_desc)); |
398 | desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); | 401 | desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); |
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 7f3a47443494..d3e9cc3153a9 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
@@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data) | |||
234 | ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | | 234 | ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR | |
235 | (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; | 235 | (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT; |
236 | 236 | ||
237 | writel_relaxed(data_reg_lo, | 237 | writel(data_reg_lo, |
238 | drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); | 238 | drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO); |
239 | writel_relaxed(data_reg_hi, | 239 | writel(data_reg_hi, |
240 | drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); | 240 | drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI); |
241 | 241 | ||
242 | } else { | 242 | } else { |
@@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = { | |||
697 | MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); | 697 | MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); |
698 | 698 | ||
699 | #ifdef CONFIG_OF | 699 | #ifdef CONFIG_OF |
700 | #ifdef CONFIG_HAVE_CLK | ||
700 | static int | 701 | static int |
701 | mv64xxx_calc_freq(const int tclk, const int n, const int m) | 702 | mv64xxx_calc_freq(const int tclk, const int n, const int m) |
702 | { | 703 | { |
@@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, | |||
726 | return false; | 727 | return false; |
727 | return true; | 728 | return true; |
728 | } | 729 | } |
730 | #endif /* CONFIG_HAVE_CLK */ | ||
729 | 731 | ||
730 | static int | 732 | static int |
731 | mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, | 733 | mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, |
732 | struct device *dev) | 734 | struct device *dev) |
733 | { | 735 | { |
734 | const struct of_device_id *device; | ||
735 | struct device_node *np = dev->of_node; | ||
736 | u32 bus_freq, tclk; | ||
737 | int rc = 0; | ||
738 | |||
739 | /* CLK is mandatory when using DT to describe the i2c bus. We | 736 | /* CLK is mandatory when using DT to describe the i2c bus. We |
740 | * need to know tclk in order to calculate bus clock | 737 | * need to know tclk in order to calculate bus clock |
741 | * factors. | 738 | * factors. |
@@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, | |||
744 | /* Have OF but no CLK */ | 741 | /* Have OF but no CLK */ |
745 | return -ENODEV; | 742 | return -ENODEV; |
746 | #else | 743 | #else |
744 | const struct of_device_id *device; | ||
745 | struct device_node *np = dev->of_node; | ||
746 | u32 bus_freq, tclk; | ||
747 | int rc = 0; | ||
748 | |||
747 | if (IS_ERR(drv_data->clk)) { | 749 | if (IS_ERR(drv_data->clk)) { |
748 | rc = -ENODEV; | 750 | rc = -ENODEV; |
749 | goto out; | 751 | goto out; |
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index f4a01675fa71..b7c857774708 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c | |||
@@ -780,12 +780,13 @@ static struct platform_driver mxs_i2c_driver = { | |||
780 | .owner = THIS_MODULE, | 780 | .owner = THIS_MODULE, |
781 | .of_match_table = mxs_i2c_dt_ids, | 781 | .of_match_table = mxs_i2c_dt_ids, |
782 | }, | 782 | }, |
783 | .probe = mxs_i2c_probe, | ||
783 | .remove = mxs_i2c_remove, | 784 | .remove = mxs_i2c_remove, |
784 | }; | 785 | }; |
785 | 786 | ||
786 | static int __init mxs_i2c_init(void) | 787 | static int __init mxs_i2c_init(void) |
787 | { | 788 | { |
788 | return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe); | 789 | return platform_driver_register(&mxs_i2c_driver); |
789 | } | 790 | } |
790 | subsys_initcall(mxs_i2c_init); | 791 | subsys_initcall(mxs_i2c_init); |
791 | 792 | ||
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 6d8308d5dc4e..9967a6f9c2ff 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -939,6 +939,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id) | |||
939 | /* | 939 | /* |
940 | * ProDB0017052: Clear ARDY bit twice | 940 | * ProDB0017052: Clear ARDY bit twice |
941 | */ | 941 | */ |
942 | if (stat & OMAP_I2C_STAT_ARDY) | ||
943 | omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY); | ||
944 | |||
942 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | | 945 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | |
943 | OMAP_I2C_STAT_AL)) { | 946 | OMAP_I2C_STAT_AL)) { |
944 | omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY | | 947 | omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY | |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 3535f3c0f7b4..3747b9bf67d6 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) | |||
1178 | 1178 | ||
1179 | i2c_del_adapter(&i2c->adap); | 1179 | i2c_del_adapter(&i2c->adap); |
1180 | 1180 | ||
1181 | clk_disable_unprepare(i2c->clk); | ||
1182 | |||
1183 | if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) | 1181 | if (pdev->dev.of_node && IS_ERR(i2c->pctrl)) |
1184 | s3c24xx_i2c_dt_gpio_free(i2c); | 1182 | s3c24xx_i2c_dt_gpio_free(i2c); |
1185 | 1183 | ||
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index f8f6f2e552db..04a17b9b38bb 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -859,8 +859,7 @@ static const struct i2c_algorithm stu300_algo = { | |||
859 | .functionality = stu300_func, | 859 | .functionality = stu300_func, |
860 | }; | 860 | }; |
861 | 861 | ||
862 | static int __init | 862 | static int stu300_probe(struct platform_device *pdev) |
863 | stu300_probe(struct platform_device *pdev) | ||
864 | { | 863 | { |
865 | struct stu300_dev *dev; | 864 | struct stu300_dev *dev; |
866 | struct i2c_adapter *adap; | 865 | struct i2c_adapter *adap; |
@@ -966,8 +965,7 @@ static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume); | |||
966 | #define STU300_I2C_PM NULL | 965 | #define STU300_I2C_PM NULL |
967 | #endif | 966 | #endif |
968 | 967 | ||
969 | static int __exit | 968 | static int stu300_remove(struct platform_device *pdev) |
970 | stu300_remove(struct platform_device *pdev) | ||
971 | { | 969 | { |
972 | struct stu300_dev *dev = platform_get_drvdata(pdev); | 970 | struct stu300_dev *dev = platform_get_drvdata(pdev); |
973 | 971 | ||
@@ -989,13 +987,14 @@ static struct platform_driver stu300_i2c_driver = { | |||
989 | .pm = STU300_I2C_PM, | 987 | .pm = STU300_I2C_PM, |
990 | .of_match_table = stu300_dt_match, | 988 | .of_match_table = stu300_dt_match, |
991 | }, | 989 | }, |
992 | .remove = __exit_p(stu300_remove), | 990 | .probe = stu300_probe, |
991 | .remove = stu300_remove, | ||
993 | 992 | ||
994 | }; | 993 | }; |
995 | 994 | ||
996 | static int __init stu300_init(void) | 995 | static int __init stu300_init(void) |
997 | { | 996 | { |
998 | return platform_driver_probe(&stu300_i2c_driver, stu300_probe); | 997 | return platform_driver_register(&stu300_i2c_driver); |
999 | } | 998 | } |
1000 | 999 | ||
1001 | static void __exit stu300_exit(void) | 1000 | static void __exit stu300_exit(void) |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 29d3f045a2bf..3be58f89ac77 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -1134,6 +1134,9 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap) | |||
1134 | acpi_handle handle; | 1134 | acpi_handle handle; |
1135 | acpi_status status; | 1135 | acpi_status status; |
1136 | 1136 | ||
1137 | if (!adap->dev.parent) | ||
1138 | return; | ||
1139 | |||
1137 | handle = ACPI_HANDLE(adap->dev.parent); | 1140 | handle = ACPI_HANDLE(adap->dev.parent); |
1138 | if (!handle) | 1141 | if (!handle) |
1139 | return; | 1142 | return; |
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c index 74b41ae690f3..928656e241dd 100644 --- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c +++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c | |||
@@ -200,7 +200,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev) | |||
200 | arb->parent = of_find_i2c_adapter_by_node(parent_np); | 200 | arb->parent = of_find_i2c_adapter_by_node(parent_np); |
201 | if (!arb->parent) { | 201 | if (!arb->parent) { |
202 | dev_err(dev, "Cannot find parent bus\n"); | 202 | dev_err(dev, "Cannot find parent bus\n"); |
203 | return -EINVAL; | 203 | return -EPROBE_DEFER; |
204 | } | 204 | } |
205 | 205 | ||
206 | /* Actually add the mux adapter */ | 206 | /* Actually add the mux adapter */ |
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index 5d4a99ba743e..a764da777f08 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c | |||
@@ -66,7 +66,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux, | |||
66 | struct device_node *adapter_np, *child; | 66 | struct device_node *adapter_np, *child; |
67 | struct i2c_adapter *adapter; | 67 | struct i2c_adapter *adapter; |
68 | unsigned *values, *gpios; | 68 | unsigned *values, *gpios; |
69 | int i = 0; | 69 | int i = 0, ret; |
70 | 70 | ||
71 | if (!np) | 71 | if (!np) |
72 | return -ENODEV; | 72 | return -ENODEV; |
@@ -79,7 +79,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux, | |||
79 | adapter = of_find_i2c_adapter_by_node(adapter_np); | 79 | adapter = of_find_i2c_adapter_by_node(adapter_np); |
80 | if (!adapter) { | 80 | if (!adapter) { |
81 | dev_err(&pdev->dev, "Cannot find parent bus\n"); | 81 | dev_err(&pdev->dev, "Cannot find parent bus\n"); |
82 | return -ENODEV; | 82 | return -EPROBE_DEFER; |
83 | } | 83 | } |
84 | mux->data.parent = i2c_adapter_id(adapter); | 84 | mux->data.parent = i2c_adapter_id(adapter); |
85 | put_device(&adapter->dev); | 85 | put_device(&adapter->dev); |
@@ -116,8 +116,12 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux, | |||
116 | return -ENOMEM; | 116 | return -ENOMEM; |
117 | } | 117 | } |
118 | 118 | ||
119 | for (i = 0; i < mux->data.n_gpios; i++) | 119 | for (i = 0; i < mux->data.n_gpios; i++) { |
120 | gpios[i] = of_get_named_gpio(np, "mux-gpios", i); | 120 | ret = of_get_named_gpio(np, "mux-gpios", i); |
121 | if (ret < 0) | ||
122 | return ret; | ||
123 | gpios[i] = ret; | ||
124 | } | ||
121 | 125 | ||
122 | mux->data.gpios = gpios; | 126 | mux->data.gpios = gpios; |
123 | 127 | ||
@@ -177,7 +181,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev) | |||
177 | if (!parent) { | 181 | if (!parent) { |
178 | dev_err(&pdev->dev, "Parent adapter (%d) not found\n", | 182 | dev_err(&pdev->dev, "Parent adapter (%d) not found\n", |
179 | mux->data.parent); | 183 | mux->data.parent); |
180 | return -ENODEV; | 184 | return -EPROBE_DEFER; |
181 | } | 185 | } |
182 | 186 | ||
183 | mux->parent = parent; | 187 | mux->parent = parent; |
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 69a91732ae65..68a37157377d 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c | |||
@@ -113,7 +113,7 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, | |||
113 | adapter = of_find_i2c_adapter_by_node(adapter_np); | 113 | adapter = of_find_i2c_adapter_by_node(adapter_np); |
114 | if (!adapter) { | 114 | if (!adapter) { |
115 | dev_err(mux->dev, "Cannot find parent bus\n"); | 115 | dev_err(mux->dev, "Cannot find parent bus\n"); |
116 | return -ENODEV; | 116 | return -EPROBE_DEFER; |
117 | } | 117 | } |
118 | mux->pdata->parent_bus_num = i2c_adapter_id(adapter); | 118 | mux->pdata->parent_bus_num = i2c_adapter_id(adapter); |
119 | put_device(&adapter->dev); | 119 | put_device(&adapter->dev); |
@@ -211,7 +211,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev) | |||
211 | if (!mux->parent) { | 211 | if (!mux->parent) { |
212 | dev_err(&pdev->dev, "Parent adapter (%d) not found\n", | 212 | dev_err(&pdev->dev, "Parent adapter (%d) not found\n", |
213 | mux->pdata->parent_bus_num); | 213 | mux->pdata->parent_bus_num); |
214 | ret = -ENODEV; | 214 | ret = -EPROBE_DEFER; |
215 | goto err; | 215 | goto err; |
216 | } | 216 | } |
217 | 217 | ||
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c index d0a79a4bce1c..ba6f6a91dfff 100644 --- a/drivers/iio/amplifiers/ad8366.c +++ b/drivers/iio/amplifiers/ad8366.c | |||
@@ -185,10 +185,8 @@ static int ad8366_remove(struct spi_device *spi) | |||
185 | 185 | ||
186 | iio_device_unregister(indio_dev); | 186 | iio_device_unregister(indio_dev); |
187 | 187 | ||
188 | if (!IS_ERR(reg)) { | 188 | if (!IS_ERR(reg)) |
189 | regulator_disable(reg); | 189 | regulator_disable(reg); |
190 | regulator_put(reg); | ||
191 | } | ||
192 | 190 | ||
193 | return 0; | 191 | return 0; |
194 | } | 192 | } |
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 8e84cd522e49..f95c6979efd8 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c | |||
@@ -852,7 +852,6 @@ static void iio_dev_release(struct device *device) | |||
852 | iio_device_unregister_trigger_consumer(indio_dev); | 852 | iio_device_unregister_trigger_consumer(indio_dev); |
853 | iio_device_unregister_eventset(indio_dev); | 853 | iio_device_unregister_eventset(indio_dev); |
854 | iio_device_unregister_sysfs(indio_dev); | 854 | iio_device_unregister_sysfs(indio_dev); |
855 | iio_device_unregister_debugfs(indio_dev); | ||
856 | 855 | ||
857 | ida_simple_remove(&iio_ida, indio_dev->id); | 856 | ida_simple_remove(&iio_ida, indio_dev->id); |
858 | kfree(indio_dev); | 857 | kfree(indio_dev); |
@@ -1087,6 +1086,7 @@ void iio_device_unregister(struct iio_dev *indio_dev) | |||
1087 | 1086 | ||
1088 | if (indio_dev->chrdev.dev) | 1087 | if (indio_dev->chrdev.dev) |
1089 | cdev_del(&indio_dev->chrdev); | 1088 | cdev_del(&indio_dev->chrdev); |
1089 | iio_device_unregister_debugfs(indio_dev); | ||
1090 | 1090 | ||
1091 | iio_disable_all_buffers(indio_dev); | 1091 | iio_disable_all_buffers(indio_dev); |
1092 | 1092 | ||
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index e8d2849cc81d..cab3bc7494a2 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c | |||
@@ -29,9 +29,9 @@ | |||
29 | #define ST_MAGN_NUMBER_DATA_CHANNELS 3 | 29 | #define ST_MAGN_NUMBER_DATA_CHANNELS 3 |
30 | 30 | ||
31 | /* DEFAULT VALUE FOR SENSORS */ | 31 | /* DEFAULT VALUE FOR SENSORS */ |
32 | #define ST_MAGN_DEFAULT_OUT_X_L_ADDR 0X04 | 32 | #define ST_MAGN_DEFAULT_OUT_X_H_ADDR 0X03 |
33 | #define ST_MAGN_DEFAULT_OUT_Y_L_ADDR 0X08 | 33 | #define ST_MAGN_DEFAULT_OUT_Y_H_ADDR 0X07 |
34 | #define ST_MAGN_DEFAULT_OUT_Z_L_ADDR 0X06 | 34 | #define ST_MAGN_DEFAULT_OUT_Z_H_ADDR 0X05 |
35 | 35 | ||
36 | /* FULLSCALE */ | 36 | /* FULLSCALE */ |
37 | #define ST_MAGN_FS_AVL_1300MG 1300 | 37 | #define ST_MAGN_FS_AVL_1300MG 1300 |
@@ -117,16 +117,16 @@ | |||
117 | static const struct iio_chan_spec st_magn_16bit_channels[] = { | 117 | static const struct iio_chan_spec st_magn_16bit_channels[] = { |
118 | ST_SENSORS_LSM_CHANNELS(IIO_MAGN, | 118 | ST_SENSORS_LSM_CHANNELS(IIO_MAGN, |
119 | BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), | 119 | BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), |
120 | ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16, | 120 | ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_BE, 16, 16, |
121 | ST_MAGN_DEFAULT_OUT_X_L_ADDR), | 121 | ST_MAGN_DEFAULT_OUT_X_H_ADDR), |
122 | ST_SENSORS_LSM_CHANNELS(IIO_MAGN, | 122 | ST_SENSORS_LSM_CHANNELS(IIO_MAGN, |
123 | BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), | 123 | BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), |
124 | ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16, | 124 | ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_BE, 16, 16, |
125 | ST_MAGN_DEFAULT_OUT_Y_L_ADDR), | 125 | ST_MAGN_DEFAULT_OUT_Y_H_ADDR), |
126 | ST_SENSORS_LSM_CHANNELS(IIO_MAGN, | 126 | ST_SENSORS_LSM_CHANNELS(IIO_MAGN, |
127 | BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), | 127 | BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), |
128 | ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16, | 128 | ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_BE, 16, 16, |
129 | ST_MAGN_DEFAULT_OUT_Z_L_ADDR), | 129 | ST_MAGN_DEFAULT_OUT_Z_H_ADDR), |
130 | IIO_CHAN_SOFT_TIMESTAMP(3) | 130 | IIO_CHAN_SOFT_TIMESTAMP(3) |
131 | }; | 131 | }; |
132 | 132 | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c index d5d1929753e4..cedda25232be 100644 --- a/drivers/infiniband/hw/amso1100/c2_ae.c +++ b/drivers/infiniband/hw/amso1100/c2_ae.c | |||
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state) | |||
141 | return "C2_QP_STATE_ERROR"; | 141 | return "C2_QP_STATE_ERROR"; |
142 | default: | 142 | default: |
143 | return "<invalid QP state>"; | 143 | return "<invalid QP state>"; |
144 | }; | 144 | } |
145 | } | 145 | } |
146 | 146 | ||
147 | void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) | 147 | void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 3f831de9a4d8..b1a6cb3a2809 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) | |||
164 | static int alloc_comp_eqs(struct mlx5_ib_dev *dev) | 164 | static int alloc_comp_eqs(struct mlx5_ib_dev *dev) |
165 | { | 165 | { |
166 | struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; | 166 | struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; |
167 | char name[MLX5_MAX_EQ_NAME]; | ||
167 | struct mlx5_eq *eq, *n; | 168 | struct mlx5_eq *eq, *n; |
168 | int ncomp_vec; | 169 | int ncomp_vec; |
169 | int nent; | 170 | int nent; |
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev) | |||
180 | goto clean; | 181 | goto clean; |
181 | } | 182 | } |
182 | 183 | ||
183 | snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); | 184 | snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); |
184 | err = mlx5_create_map_eq(&dev->mdev, eq, | 185 | err = mlx5_create_map_eq(&dev->mdev, eq, |
185 | i + MLX5_EQ_VEC_COMP_BASE, nent, 0, | 186 | i + MLX5_EQ_VEC_COMP_BASE, nent, 0, |
186 | eq->name, | 187 | name, &dev->mdev.priv.uuari.uars[0]); |
187 | &dev->mdev.priv.uuari.uars[0]); | ||
188 | if (err) { | 188 | if (err) { |
189 | kfree(eq); | 189 | kfree(eq); |
190 | goto clean; | 190 | goto clean; |
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
301 | props->max_srq_sge = max_rq_sg - 1; | 301 | props->max_srq_sge = max_rq_sg - 1; |
302 | props->max_fast_reg_page_list_len = (unsigned int)-1; | 302 | props->max_fast_reg_page_list_len = (unsigned int)-1; |
303 | props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; | 303 | props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; |
304 | props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? | 304 | props->atomic_cap = IB_ATOMIC_NONE; |
305 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; | 305 | props->masked_atomic_cap = IB_ATOMIC_NONE; |
306 | props->masked_atomic_cap = IB_ATOMIC_HCA; | ||
307 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); | 306 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); |
308 | props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; | 307 | props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; |
309 | props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; | 308 | props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; |
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, | |||
1006 | ibev.device = &ibdev->ib_dev; | 1005 | ibev.device = &ibdev->ib_dev; |
1007 | ibev.element.port_num = port; | 1006 | ibev.element.port_num = port; |
1008 | 1007 | ||
1008 | if (port < 1 || port > ibdev->num_ports) { | ||
1009 | mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); | ||
1010 | return; | ||
1011 | } | ||
1012 | |||
1009 | if (ibdev->ib_active) | 1013 | if (ibdev->ib_active) |
1010 | ib_dispatch_event(&ibev); | 1014 | ib_dispatch_event(&ibev); |
1011 | } | 1015 | } |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index bd41df95b6f0..3453580b1eb2 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -42,6 +42,10 @@ enum { | |||
42 | DEF_CACHE_SIZE = 10, | 42 | DEF_CACHE_SIZE = 10, |
43 | }; | 43 | }; |
44 | 44 | ||
45 | enum { | ||
46 | MLX5_UMR_ALIGN = 2048 | ||
47 | }; | ||
48 | |||
45 | static __be64 *mr_align(__be64 *ptr, int align) | 49 | static __be64 *mr_align(__be64 *ptr, int align) |
46 | { | 50 | { |
47 | unsigned long mask = align - 1; | 51 | unsigned long mask = align - 1; |
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order) | |||
61 | 65 | ||
62 | static int add_keys(struct mlx5_ib_dev *dev, int c, int num) | 66 | static int add_keys(struct mlx5_ib_dev *dev, int c, int num) |
63 | { | 67 | { |
64 | struct device *ddev = dev->ib_dev.dma_device; | ||
65 | struct mlx5_mr_cache *cache = &dev->cache; | 68 | struct mlx5_mr_cache *cache = &dev->cache; |
66 | struct mlx5_cache_ent *ent = &cache->ent[c]; | 69 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
67 | struct mlx5_create_mkey_mbox_in *in; | 70 | struct mlx5_create_mkey_mbox_in *in; |
68 | struct mlx5_ib_mr *mr; | 71 | struct mlx5_ib_mr *mr; |
69 | int npages = 1 << ent->order; | 72 | int npages = 1 << ent->order; |
70 | int size = sizeof(u64) * npages; | ||
71 | int err = 0; | 73 | int err = 0; |
72 | int i; | 74 | int i; |
73 | 75 | ||
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) | |||
83 | } | 85 | } |
84 | mr->order = ent->order; | 86 | mr->order = ent->order; |
85 | mr->umred = 1; | 87 | mr->umred = 1; |
86 | mr->pas = kmalloc(size + 0x3f, GFP_KERNEL); | ||
87 | if (!mr->pas) { | ||
88 | kfree(mr); | ||
89 | err = -ENOMEM; | ||
90 | goto out; | ||
91 | } | ||
92 | mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size, | ||
93 | DMA_TO_DEVICE); | ||
94 | if (dma_mapping_error(ddev, mr->dma)) { | ||
95 | kfree(mr->pas); | ||
96 | kfree(mr); | ||
97 | err = -ENOMEM; | ||
98 | goto out; | ||
99 | } | ||
100 | |||
101 | in->seg.status = 1 << 6; | 88 | in->seg.status = 1 << 6; |
102 | in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); | 89 | in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); |
103 | in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | 90 | in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); |
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) | |||
108 | sizeof(*in)); | 95 | sizeof(*in)); |
109 | if (err) { | 96 | if (err) { |
110 | mlx5_ib_warn(dev, "create mkey failed %d\n", err); | 97 | mlx5_ib_warn(dev, "create mkey failed %d\n", err); |
111 | dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | ||
112 | kfree(mr->pas); | ||
113 | kfree(mr); | 98 | kfree(mr); |
114 | goto out; | 99 | goto out; |
115 | } | 100 | } |
@@ -129,11 +114,9 @@ out: | |||
129 | 114 | ||
130 | static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) | 115 | static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) |
131 | { | 116 | { |
132 | struct device *ddev = dev->ib_dev.dma_device; | ||
133 | struct mlx5_mr_cache *cache = &dev->cache; | 117 | struct mlx5_mr_cache *cache = &dev->cache; |
134 | struct mlx5_cache_ent *ent = &cache->ent[c]; | 118 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
135 | struct mlx5_ib_mr *mr; | 119 | struct mlx5_ib_mr *mr; |
136 | int size; | ||
137 | int err; | 120 | int err; |
138 | int i; | 121 | int i; |
139 | 122 | ||
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) | |||
149 | ent->size--; | 132 | ent->size--; |
150 | spin_unlock(&ent->lock); | 133 | spin_unlock(&ent->lock); |
151 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); | 134 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); |
152 | if (err) { | 135 | if (err) |
153 | mlx5_ib_warn(dev, "failed destroy mkey\n"); | 136 | mlx5_ib_warn(dev, "failed destroy mkey\n"); |
154 | } else { | 137 | else |
155 | size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); | ||
156 | dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | ||
157 | kfree(mr->pas); | ||
158 | kfree(mr); | 138 | kfree(mr); |
159 | } | ||
160 | } | 139 | } |
161 | } | 140 | } |
162 | 141 | ||
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
408 | 387 | ||
409 | static void clean_keys(struct mlx5_ib_dev *dev, int c) | 388 | static void clean_keys(struct mlx5_ib_dev *dev, int c) |
410 | { | 389 | { |
411 | struct device *ddev = dev->ib_dev.dma_device; | ||
412 | struct mlx5_mr_cache *cache = &dev->cache; | 390 | struct mlx5_mr_cache *cache = &dev->cache; |
413 | struct mlx5_cache_ent *ent = &cache->ent[c]; | 391 | struct mlx5_cache_ent *ent = &cache->ent[c]; |
414 | struct mlx5_ib_mr *mr; | 392 | struct mlx5_ib_mr *mr; |
415 | int size; | ||
416 | int err; | 393 | int err; |
417 | 394 | ||
395 | cancel_delayed_work(&ent->dwork); | ||
418 | while (1) { | 396 | while (1) { |
419 | spin_lock(&ent->lock); | 397 | spin_lock(&ent->lock); |
420 | if (list_empty(&ent->head)) { | 398 | if (list_empty(&ent->head)) { |
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) | |||
427 | ent->size--; | 405 | ent->size--; |
428 | spin_unlock(&ent->lock); | 406 | spin_unlock(&ent->lock); |
429 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); | 407 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); |
430 | if (err) { | 408 | if (err) |
431 | mlx5_ib_warn(dev, "failed destroy mkey\n"); | 409 | mlx5_ib_warn(dev, "failed destroy mkey\n"); |
432 | } else { | 410 | else |
433 | size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); | ||
434 | dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | ||
435 | kfree(mr->pas); | ||
436 | kfree(mr); | 411 | kfree(mr); |
437 | } | ||
438 | } | 412 | } |
439 | } | 413 | } |
440 | 414 | ||
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) | |||
540 | int i; | 514 | int i; |
541 | 515 | ||
542 | dev->cache.stopped = 1; | 516 | dev->cache.stopped = 1; |
543 | destroy_workqueue(dev->cache.wq); | 517 | flush_workqueue(dev->cache.wq); |
544 | 518 | ||
545 | mlx5_mr_cache_debugfs_cleanup(dev); | 519 | mlx5_mr_cache_debugfs_cleanup(dev); |
546 | 520 | ||
547 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) | 521 | for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) |
548 | clean_keys(dev, i); | 522 | clean_keys(dev, i); |
549 | 523 | ||
524 | destroy_workqueue(dev->cache.wq); | ||
525 | |||
550 | return 0; | 526 | return 0; |
551 | } | 527 | } |
552 | 528 | ||
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
675 | int page_shift, int order, int access_flags) | 651 | int page_shift, int order, int access_flags) |
676 | { | 652 | { |
677 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 653 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
654 | struct device *ddev = dev->ib_dev.dma_device; | ||
678 | struct umr_common *umrc = &dev->umrc; | 655 | struct umr_common *umrc = &dev->umrc; |
679 | struct ib_send_wr wr, *bad; | 656 | struct ib_send_wr wr, *bad; |
680 | struct mlx5_ib_mr *mr; | 657 | struct mlx5_ib_mr *mr; |
681 | struct ib_sge sg; | 658 | struct ib_sge sg; |
659 | int size = sizeof(u64) * npages; | ||
682 | int err; | 660 | int err; |
683 | int i; | 661 | int i; |
684 | 662 | ||
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
697 | if (!mr) | 675 | if (!mr) |
698 | return ERR_PTR(-EAGAIN); | 676 | return ERR_PTR(-EAGAIN); |
699 | 677 | ||
700 | mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); | 678 | mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); |
679 | if (!mr->pas) { | ||
680 | err = -ENOMEM; | ||
681 | goto error; | ||
682 | } | ||
683 | |||
684 | mlx5_ib_populate_pas(dev, umem, page_shift, | ||
685 | mr_align(mr->pas, MLX5_UMR_ALIGN), 1); | ||
686 | |||
687 | mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size, | ||
688 | DMA_TO_DEVICE); | ||
689 | if (dma_mapping_error(ddev, mr->dma)) { | ||
690 | kfree(mr->pas); | ||
691 | err = -ENOMEM; | ||
692 | goto error; | ||
693 | } | ||
701 | 694 | ||
702 | memset(&wr, 0, sizeof(wr)); | 695 | memset(&wr, 0, sizeof(wr)); |
703 | wr.wr_id = (u64)(unsigned long)mr; | 696 | wr.wr_id = (u64)(unsigned long)mr; |
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
718 | wait_for_completion(&mr->done); | 711 | wait_for_completion(&mr->done); |
719 | up(&umrc->sem); | 712 | up(&umrc->sem); |
720 | 713 | ||
714 | dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); | ||
715 | kfree(mr->pas); | ||
716 | |||
721 | if (mr->status != IB_WC_SUCCESS) { | 717 | if (mr->status != IB_WC_SUCCESS) { |
722 | mlx5_ib_warn(dev, "reg umr failed\n"); | 718 | mlx5_ib_warn(dev, "reg umr failed\n"); |
723 | err = -EFAULT; | 719 | err = -EFAULT; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 045f8cdbd303..5659ea880741 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
203 | 203 | ||
204 | switch (qp_type) { | 204 | switch (qp_type) { |
205 | case IB_QPT_XRC_INI: | 205 | case IB_QPT_XRC_INI: |
206 | size = sizeof(struct mlx5_wqe_xrc_seg); | 206 | size += sizeof(struct mlx5_wqe_xrc_seg); |
207 | /* fall through */ | 207 | /* fall through */ |
208 | case IB_QPT_RC: | 208 | case IB_QPT_RC: |
209 | size += sizeof(struct mlx5_wqe_ctrl_seg) + | 209 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
211 | sizeof(struct mlx5_wqe_raddr_seg); | 211 | sizeof(struct mlx5_wqe_raddr_seg); |
212 | break; | 212 | break; |
213 | 213 | ||
214 | case IB_QPT_XRC_TGT: | ||
215 | return 0; | ||
216 | |||
214 | case IB_QPT_UC: | 217 | case IB_QPT_UC: |
215 | size = sizeof(struct mlx5_wqe_ctrl_seg) + | 218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
216 | sizeof(struct mlx5_wqe_raddr_seg); | 219 | sizeof(struct mlx5_wqe_raddr_seg); |
217 | break; | 220 | break; |
218 | 221 | ||
219 | case IB_QPT_UD: | 222 | case IB_QPT_UD: |
220 | case IB_QPT_SMI: | 223 | case IB_QPT_SMI: |
221 | case IB_QPT_GSI: | 224 | case IB_QPT_GSI: |
222 | size = sizeof(struct mlx5_wqe_ctrl_seg) + | 225 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
223 | sizeof(struct mlx5_wqe_datagram_seg); | 226 | sizeof(struct mlx5_wqe_datagram_seg); |
224 | break; | 227 | break; |
225 | 228 | ||
226 | case MLX5_IB_QPT_REG_UMR: | 229 | case MLX5_IB_QPT_REG_UMR: |
227 | size = sizeof(struct mlx5_wqe_ctrl_seg) + | 230 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
228 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + | 231 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + |
229 | sizeof(struct mlx5_mkey_seg); | 232 | sizeof(struct mlx5_mkey_seg); |
230 | break; | 233 | break; |
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
270 | return wqe_size; | 273 | return wqe_size; |
271 | 274 | ||
272 | if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { | 275 | if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { |
273 | mlx5_ib_dbg(dev, "\n"); | 276 | mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", |
277 | wqe_size, dev->mdev.caps.max_sq_desc_sz); | ||
274 | return -EINVAL; | 278 | return -EINVAL; |
275 | } | 279 | } |
276 | 280 | ||
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
280 | 284 | ||
281 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); | 285 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); |
282 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; | 286 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; |
287 | if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { | ||
288 | mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", | ||
289 | qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); | ||
290 | return -ENOMEM; | ||
291 | } | ||
283 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); | 292 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); |
284 | qp->sq.max_gs = attr->cap.max_send_sge; | 293 | qp->sq.max_gs = attr->cap.max_send_sge; |
285 | qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); | 294 | qp->sq.max_post = wq_size / wqe_size; |
295 | attr->cap.max_send_wr = qp->sq.max_post; | ||
286 | 296 | ||
287 | return wq_size; | 297 | return wq_size; |
288 | } | 298 | } |
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q | |||
1280 | MLX5_QP_OPTPAR_Q_KEY, | 1290 | MLX5_QP_OPTPAR_Q_KEY, |
1281 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | | 1291 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | |
1282 | MLX5_QP_OPTPAR_Q_KEY, | 1292 | MLX5_QP_OPTPAR_Q_KEY, |
1293 | [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | | ||
1294 | MLX5_QP_OPTPAR_RRE | | ||
1295 | MLX5_QP_OPTPAR_RAE | | ||
1296 | MLX5_QP_OPTPAR_RWE | | ||
1297 | MLX5_QP_OPTPAR_PKEY_INDEX, | ||
1283 | }, | 1298 | }, |
1284 | }, | 1299 | }, |
1285 | [MLX5_QP_STATE_RTR] = { | 1300 | [MLX5_QP_STATE_RTR] = { |
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q | |||
1314 | [MLX5_QP_STATE_RTS] = { | 1329 | [MLX5_QP_STATE_RTS] = { |
1315 | [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, | 1330 | [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, |
1316 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, | 1331 | [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, |
1332 | [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, | ||
1333 | [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | | ||
1334 | MLX5_QP_OPTPAR_RWE | | ||
1335 | MLX5_QP_OPTPAR_RAE | | ||
1336 | MLX5_QP_OPTPAR_RRE, | ||
1317 | }, | 1337 | }, |
1318 | }, | 1338 | }, |
1319 | }; | 1339 | }; |
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, | |||
1651 | rseg->reserved = 0; | 1671 | rseg->reserved = 0; |
1652 | } | 1672 | } |
1653 | 1673 | ||
1654 | static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr) | ||
1655 | { | ||
1656 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | ||
1657 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1658 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1659 | } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { | ||
1660 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1661 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); | ||
1662 | } else { | ||
1663 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1664 | aseg->compare = 0; | ||
1665 | } | ||
1666 | } | ||
1667 | |||
1668 | static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg, | ||
1669 | struct ib_send_wr *wr) | ||
1670 | { | ||
1671 | aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); | ||
1672 | aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); | ||
1673 | aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); | ||
1674 | aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); | ||
1675 | } | ||
1676 | |||
1677 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, | 1674 | static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, |
1678 | struct ib_send_wr *wr) | 1675 | struct ib_send_wr *wr) |
1679 | { | 1676 | { |
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
2063 | 2060 | ||
2064 | case IB_WR_ATOMIC_CMP_AND_SWP: | 2061 | case IB_WR_ATOMIC_CMP_AND_SWP: |
2065 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 2062 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
2066 | set_raddr_seg(seg, wr->wr.atomic.remote_addr, | ||
2067 | wr->wr.atomic.rkey); | ||
2068 | seg += sizeof(struct mlx5_wqe_raddr_seg); | ||
2069 | |||
2070 | set_atomic_seg(seg, wr); | ||
2071 | seg += sizeof(struct mlx5_wqe_atomic_seg); | ||
2072 | |||
2073 | size += (sizeof(struct mlx5_wqe_raddr_seg) + | ||
2074 | sizeof(struct mlx5_wqe_atomic_seg)) / 16; | ||
2075 | break; | ||
2076 | |||
2077 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | 2063 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: |
2078 | set_raddr_seg(seg, wr->wr.atomic.remote_addr, | 2064 | mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); |
2079 | wr->wr.atomic.rkey); | 2065 | err = -ENOSYS; |
2080 | seg += sizeof(struct mlx5_wqe_raddr_seg); | 2066 | *bad_wr = wr; |
2081 | 2067 | goto out; | |
2082 | set_masked_atomic_seg(seg, wr); | ||
2083 | seg += sizeof(struct mlx5_wqe_masked_atomic_seg); | ||
2084 | |||
2085 | size += (sizeof(struct mlx5_wqe_raddr_seg) + | ||
2086 | sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16; | ||
2087 | break; | ||
2088 | 2068 | ||
2089 | case IB_WR_LOCAL_INV: | 2069 | case IB_WR_LOCAL_INV: |
2090 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | 2070 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 84d297afd6a9..0aa478bc291a 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
295 | mlx5_vfree(in); | 295 | mlx5_vfree(in); |
296 | if (err) { | 296 | if (err) { |
297 | mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); | 297 | mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); |
298 | goto err_srq; | 298 | goto err_usr_kern_srq; |
299 | } | 299 | } |
300 | 300 | ||
301 | mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); | 301 | mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); |
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
316 | 316 | ||
317 | err_core: | 317 | err_core: |
318 | mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); | 318 | mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); |
319 | |||
320 | err_usr_kern_srq: | ||
319 | if (pd->uobject) | 321 | if (pd->uobject) |
320 | destroy_srq_user(pd, srq); | 322 | destroy_srq_user(pd, srq); |
321 | else | 323 | else |
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 7c9d35f39d75..690201738993 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c | |||
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) | |||
357 | mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", | 357 | mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", |
358 | eqe->type, eqe->subtype, eq->eqn); | 358 | eqe->type, eqe->subtype, eq->eqn); |
359 | break; | 359 | break; |
360 | }; | 360 | } |
361 | 361 | ||
362 | set_eqe_hw(eqe); | 362 | set_eqe_hw(eqe); |
363 | ++eq->cons_index; | 363 | ++eq->cons_index; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 4ed8235d2d36..50219ab2279d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps) | |||
150 | return IB_QPS_SQE; | 150 | return IB_QPS_SQE; |
151 | case OCRDMA_QPS_ERR: | 151 | case OCRDMA_QPS_ERR: |
152 | return IB_QPS_ERR; | 152 | return IB_QPS_ERR; |
153 | }; | 153 | } |
154 | return IB_QPS_ERR; | 154 | return IB_QPS_ERR; |
155 | } | 155 | } |
156 | 156 | ||
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps) | |||
171 | return OCRDMA_QPS_SQE; | 171 | return OCRDMA_QPS_SQE; |
172 | case IB_QPS_ERR: | 172 | case IB_QPS_ERR: |
173 | return OCRDMA_QPS_ERR; | 173 | return OCRDMA_QPS_ERR; |
174 | }; | 174 | } |
175 | return OCRDMA_QPS_ERR; | 175 | return OCRDMA_QPS_ERR; |
176 | } | 176 | } |
177 | 177 | ||
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, | |||
1982 | break; | 1982 | break; |
1983 | default: | 1983 | default: |
1984 | return -EINVAL; | 1984 | return -EINVAL; |
1985 | }; | 1985 | } |
1986 | 1986 | ||
1987 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); | 1987 | cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); |
1988 | if (!cmd) | 1988 | if (!cmd) |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 56e004940f18..0ce7674621ea 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) | |||
531 | case BE_DEV_DOWN: | 531 | case BE_DEV_DOWN: |
532 | ocrdma_close(dev); | 532 | ocrdma_close(dev); |
533 | break; | 533 | break; |
534 | }; | 534 | } |
535 | } | 535 | } |
536 | 536 | ||
537 | static struct ocrdma_driver ocrdma_drv = { | 537 | static struct ocrdma_driver ocrdma_drv = { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 6e982bb43c31..69f1d1221a6b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev, | |||
141 | /* Unsupported */ | 141 | /* Unsupported */ |
142 | *ib_speed = IB_SPEED_SDR; | 142 | *ib_speed = IB_SPEED_SDR; |
143 | *ib_width = IB_WIDTH_1X; | 143 | *ib_width = IB_WIDTH_1X; |
144 | }; | 144 | } |
145 | } | 145 | } |
146 | 146 | ||
147 | 147 | ||
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status) | |||
2331 | default: | 2331 | default: |
2332 | ibwc_status = IB_WC_GENERAL_ERR; | 2332 | ibwc_status = IB_WC_GENERAL_ERR; |
2333 | break; | 2333 | break; |
2334 | }; | 2334 | } |
2335 | return ibwc_status; | 2335 | return ibwc_status; |
2336 | } | 2336 | } |
2337 | 2337 | ||
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, | |||
2370 | pr_err("%s() invalid opcode received = 0x%x\n", | 2370 | pr_err("%s() invalid opcode received = 0x%x\n", |
2371 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | 2371 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); |
2372 | break; | 2372 | break; |
2373 | }; | 2373 | } |
2374 | } | 2374 | } |
2375 | 2375 | ||
2376 | static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, | 2376 | static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 653ac6bfc57a..6c923c7039a1 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -1588,7 +1588,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, | |||
1588 | int resp_data_len; | 1588 | int resp_data_len; |
1589 | int resp_len; | 1589 | int resp_len; |
1590 | 1590 | ||
1591 | resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; | 1591 | resp_data_len = 4; |
1592 | resp_len = sizeof(*srp_rsp) + resp_data_len; | 1592 | resp_len = sizeof(*srp_rsp) + resp_data_len; |
1593 | 1593 | ||
1594 | srp_rsp = ioctx->ioctx.buf; | 1594 | srp_rsp = ioctx->ioctx.buf; |
@@ -1600,11 +1600,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, | |||
1600 | + atomic_xchg(&ch->req_lim_delta, 0)); | 1600 | + atomic_xchg(&ch->req_lim_delta, 0)); |
1601 | srp_rsp->tag = tag; | 1601 | srp_rsp->tag = tag; |
1602 | 1602 | ||
1603 | if (rsp_code != SRP_TSK_MGMT_SUCCESS) { | 1603 | srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; |
1604 | srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; | 1604 | srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); |
1605 | srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); | 1605 | srp_rsp->data[3] = rsp_code; |
1606 | srp_rsp->data[3] = rsp_code; | ||
1607 | } | ||
1608 | 1606 | ||
1609 | return resp_len; | 1607 | return resp_len; |
1610 | } | 1608 | } |
@@ -2358,6 +2356,8 @@ static void srpt_release_channel_work(struct work_struct *w) | |||
2358 | transport_deregister_session(se_sess); | 2356 | transport_deregister_session(se_sess); |
2359 | ch->sess = NULL; | 2357 | ch->sess = NULL; |
2360 | 2358 | ||
2359 | ib_destroy_cm_id(ch->cm_id); | ||
2360 | |||
2361 | srpt_destroy_ch_ib(ch); | 2361 | srpt_destroy_ch_ib(ch); |
2362 | 2362 | ||
2363 | srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, | 2363 | srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, |
@@ -2368,8 +2368,6 @@ static void srpt_release_channel_work(struct work_struct *w) | |||
2368 | list_del(&ch->list); | 2368 | list_del(&ch->list); |
2369 | spin_unlock_irq(&sdev->spinlock); | 2369 | spin_unlock_irq(&sdev->spinlock); |
2370 | 2370 | ||
2371 | ib_destroy_cm_id(ch->cm_id); | ||
2372 | |||
2373 | if (ch->release_done) | 2371 | if (ch->release_done) |
2374 | complete(ch->release_done); | 2372 | complete(ch->release_done); |
2375 | 2373 | ||
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index fe302e33f72e..c880ebaf1553 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -52,7 +52,7 @@ config AMD_IOMMU | |||
52 | select PCI_PRI | 52 | select PCI_PRI |
53 | select PCI_PASID | 53 | select PCI_PASID |
54 | select IOMMU_API | 54 | select IOMMU_API |
55 | depends on X86_64 && PCI && ACPI && X86_IO_APIC | 55 | depends on X86_64 && PCI && ACPI |
56 | ---help--- | 56 | ---help--- |
57 | With this option you can enable support for AMD IOMMU hardware in | 57 | With this option you can enable support for AMD IOMMU hardware in |
58 | your system. An IOMMU is a hardware component which provides | 58 | your system. An IOMMU is a hardware component which provides |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f417e89e1e7e..181c9ba929cd 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -377,6 +377,7 @@ struct arm_smmu_cfg { | |||
377 | u32 cbar; | 377 | u32 cbar; |
378 | pgd_t *pgd; | 378 | pgd_t *pgd; |
379 | }; | 379 | }; |
380 | #define INVALID_IRPTNDX 0xff | ||
380 | 381 | ||
381 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) | 382 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) |
382 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) | 383 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) |
@@ -840,7 +841,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
840 | if (IS_ERR_VALUE(ret)) { | 841 | if (IS_ERR_VALUE(ret)) { |
841 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | 842 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", |
842 | root_cfg->irptndx, irq); | 843 | root_cfg->irptndx, irq); |
843 | root_cfg->irptndx = -1; | 844 | root_cfg->irptndx = INVALID_IRPTNDX; |
844 | goto out_free_context; | 845 | goto out_free_context; |
845 | } | 846 | } |
846 | 847 | ||
@@ -869,7 +870,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |||
869 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | 870 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
870 | arm_smmu_tlb_inv_context(root_cfg); | 871 | arm_smmu_tlb_inv_context(root_cfg); |
871 | 872 | ||
872 | if (root_cfg->irptndx != -1) { | 873 | if (root_cfg->irptndx != INVALID_IRPTNDX) { |
873 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; | 874 | irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx]; |
874 | free_irq(irq, domain); | 875 | free_irq(irq, domain); |
875 | } | 876 | } |
@@ -1857,8 +1858,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1857 | goto out_put_parent; | 1858 | goto out_put_parent; |
1858 | } | 1859 | } |
1859 | 1860 | ||
1860 | arm_smmu_device_reset(smmu); | ||
1861 | |||
1862 | for (i = 0; i < smmu->num_global_irqs; ++i) { | 1861 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
1863 | err = request_irq(smmu->irqs[i], | 1862 | err = request_irq(smmu->irqs[i], |
1864 | arm_smmu_global_fault, | 1863 | arm_smmu_global_fault, |
@@ -1876,6 +1875,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1876 | spin_lock(&arm_smmu_devices_lock); | 1875 | spin_lock(&arm_smmu_devices_lock); |
1877 | list_add(&smmu->list, &arm_smmu_devices); | 1876 | list_add(&smmu->list, &arm_smmu_devices); |
1878 | spin_unlock(&arm_smmu_devices_lock); | 1877 | spin_unlock(&arm_smmu_devices_lock); |
1878 | |||
1879 | arm_smmu_device_reset(smmu); | ||
1879 | return 0; | 1880 | return 0; |
1880 | 1881 | ||
1881 | out_free_irqs: | 1882 | out_free_irqs: |
@@ -1966,10 +1967,10 @@ static int __init arm_smmu_init(void) | |||
1966 | return ret; | 1967 | return ret; |
1967 | 1968 | ||
1968 | /* Oh, for a proper bus abstraction */ | 1969 | /* Oh, for a proper bus abstraction */ |
1969 | if (!iommu_present(&platform_bus_type)); | 1970 | if (!iommu_present(&platform_bus_type)) |
1970 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); | 1971 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); |
1971 | 1972 | ||
1972 | if (!iommu_present(&amba_bustype)); | 1973 | if (!iommu_present(&amba_bustype)) |
1973 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); | 1974 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
1974 | 1975 | ||
1975 | return 0; | 1976 | return 0; |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index b39f6f0b45f2..0f12382aa35d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -498,7 +498,7 @@ struct cached_dev { | |||
498 | */ | 498 | */ |
499 | atomic_t has_dirty; | 499 | atomic_t has_dirty; |
500 | 500 | ||
501 | struct ratelimit writeback_rate; | 501 | struct bch_ratelimit writeback_rate; |
502 | struct delayed_work writeback_rate_update; | 502 | struct delayed_work writeback_rate_update; |
503 | 503 | ||
504 | /* | 504 | /* |
@@ -507,10 +507,9 @@ struct cached_dev { | |||
507 | */ | 507 | */ |
508 | sector_t last_read; | 508 | sector_t last_read; |
509 | 509 | ||
510 | /* Number of writeback bios in flight */ | 510 | /* Limit number of writeback bios in flight */ |
511 | atomic_t in_flight; | 511 | struct semaphore in_flight; |
512 | struct closure_with_timer writeback; | 512 | struct closure_with_timer writeback; |
513 | struct closure_waitlist writeback_wait; | ||
514 | 513 | ||
515 | struct keybuf writeback_keys; | 514 | struct keybuf writeback_keys; |
516 | 515 | ||
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 8010eed06a51..22d1ae72c282 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c | |||
@@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search) | |||
926 | 926 | ||
927 | /* Mergesort */ | 927 | /* Mergesort */ |
928 | 928 | ||
929 | static void sort_key_next(struct btree_iter *iter, | ||
930 | struct btree_iter_set *i) | ||
931 | { | ||
932 | i->k = bkey_next(i->k); | ||
933 | |||
934 | if (i->k == i->end) | ||
935 | *i = iter->data[--iter->used]; | ||
936 | } | ||
937 | |||
929 | static void btree_sort_fixup(struct btree_iter *iter) | 938 | static void btree_sort_fixup(struct btree_iter *iter) |
930 | { | 939 | { |
931 | while (iter->used > 1) { | 940 | while (iter->used > 1) { |
932 | struct btree_iter_set *top = iter->data, *i = top + 1; | 941 | struct btree_iter_set *top = iter->data, *i = top + 1; |
933 | struct bkey *k; | ||
934 | 942 | ||
935 | if (iter->used > 2 && | 943 | if (iter->used > 2 && |
936 | btree_iter_cmp(i[0], i[1])) | 944 | btree_iter_cmp(i[0], i[1])) |
937 | i++; | 945 | i++; |
938 | 946 | ||
939 | for (k = i->k; | 947 | if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) |
940 | k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0; | ||
941 | k = bkey_next(k)) | ||
942 | if (top->k > i->k) | ||
943 | __bch_cut_front(top->k, k); | ||
944 | else if (KEY_SIZE(k)) | ||
945 | bch_cut_back(&START_KEY(k), top->k); | ||
946 | |||
947 | if (top->k < i->k || k == i->k) | ||
948 | break; | 948 | break; |
949 | 949 | ||
950 | heap_sift(iter, i - top, btree_iter_cmp); | 950 | if (!KEY_SIZE(i->k)) { |
951 | sort_key_next(iter, i); | ||
952 | heap_sift(iter, i - top, btree_iter_cmp); | ||
953 | continue; | ||
954 | } | ||
955 | |||
956 | if (top->k > i->k) { | ||
957 | if (bkey_cmp(top->k, i->k) >= 0) | ||
958 | sort_key_next(iter, i); | ||
959 | else | ||
960 | bch_cut_front(top->k, i->k); | ||
961 | |||
962 | heap_sift(iter, i - top, btree_iter_cmp); | ||
963 | } else { | ||
964 | /* can't happen because of comparison func */ | ||
965 | BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); | ||
966 | bch_cut_back(&START_KEY(i->k), top->k); | ||
967 | } | ||
951 | } | 968 | } |
952 | } | 969 | } |
953 | 970 | ||
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index f9764e61978b..f42fc7ed9cd6 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b) | |||
255 | 255 | ||
256 | return; | 256 | return; |
257 | err: | 257 | err: |
258 | bch_cache_set_error(b->c, "io error reading bucket %lu", | 258 | bch_cache_set_error(b->c, "io error reading bucket %zu", |
259 | PTR_BUCKET_NR(b->c, &b->key, 0)); | 259 | PTR_BUCKET_NR(b->c, &b->key, 0)); |
260 | } | 260 | } |
261 | 261 | ||
@@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, | |||
612 | return SHRINK_STOP; | 612 | return SHRINK_STOP; |
613 | 613 | ||
614 | /* Return -1 if we can't do anything right now */ | 614 | /* Return -1 if we can't do anything right now */ |
615 | if (sc->gfp_mask & __GFP_WAIT) | 615 | if (sc->gfp_mask & __GFP_IO) |
616 | mutex_lock(&c->bucket_lock); | 616 | mutex_lock(&c->bucket_lock); |
617 | else if (!mutex_trylock(&c->bucket_lock)) | 617 | else if (!mutex_trylock(&c->bucket_lock)) |
618 | return -1; | 618 | return -1; |
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ba95ab84b2be..8435f81e5d85 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
@@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list, | |||
153 | bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); | 153 | bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); |
154 | pr_debug("%u journal buckets", ca->sb.njournal_buckets); | 154 | pr_debug("%u journal buckets", ca->sb.njournal_buckets); |
155 | 155 | ||
156 | /* Read journal buckets ordered by golden ratio hash to quickly | 156 | /* |
157 | * Read journal buckets ordered by golden ratio hash to quickly | ||
157 | * find a sequence of buckets with valid journal entries | 158 | * find a sequence of buckets with valid journal entries |
158 | */ | 159 | */ |
159 | for (i = 0; i < ca->sb.njournal_buckets; i++) { | 160 | for (i = 0; i < ca->sb.njournal_buckets; i++) { |
@@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list, | |||
166 | goto bsearch; | 167 | goto bsearch; |
167 | } | 168 | } |
168 | 169 | ||
169 | /* If that fails, check all the buckets we haven't checked | 170 | /* |
171 | * If that fails, check all the buckets we haven't checked | ||
170 | * already | 172 | * already |
171 | */ | 173 | */ |
172 | pr_debug("falling back to linear search"); | 174 | pr_debug("falling back to linear search"); |
173 | 175 | ||
174 | for (l = 0; l < ca->sb.njournal_buckets; l++) { | 176 | for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); |
175 | if (test_bit(l, bitmap)) | 177 | l < ca->sb.njournal_buckets; |
176 | continue; | 178 | l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) |
177 | |||
178 | if (read_bucket(l)) | 179 | if (read_bucket(l)) |
179 | goto bsearch; | 180 | goto bsearch; |
180 | } | 181 | |
182 | if (list_empty(list)) | ||
183 | continue; | ||
181 | bsearch: | 184 | bsearch: |
182 | /* Binary search */ | 185 | /* Binary search */ |
183 | m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); | 186 | m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); |
@@ -197,10 +200,12 @@ bsearch: | |||
197 | r = m; | 200 | r = m; |
198 | } | 201 | } |
199 | 202 | ||
200 | /* Read buckets in reverse order until we stop finding more | 203 | /* |
204 | * Read buckets in reverse order until we stop finding more | ||
201 | * journal entries | 205 | * journal entries |
202 | */ | 206 | */ |
203 | pr_debug("finishing up"); | 207 | pr_debug("finishing up: m %u njournal_buckets %u", |
208 | m, ca->sb.njournal_buckets); | ||
204 | l = m; | 209 | l = m; |
205 | 210 | ||
206 | while (1) { | 211 | while (1) { |
@@ -228,9 +233,10 @@ bsearch: | |||
228 | } | 233 | } |
229 | } | 234 | } |
230 | 235 | ||
231 | c->journal.seq = list_entry(list->prev, | 236 | if (!list_empty(list)) |
232 | struct journal_replay, | 237 | c->journal.seq = list_entry(list->prev, |
233 | list)->j.seq; | 238 | struct journal_replay, |
239 | list)->j.seq; | ||
234 | 240 | ||
235 | return 0; | 241 | return 0; |
236 | #undef read_bucket | 242 | #undef read_bucket |
@@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca) | |||
428 | return; | 434 | return; |
429 | } | 435 | } |
430 | 436 | ||
431 | switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) { | 437 | switch (atomic_read(&ja->discard_in_flight)) { |
432 | case DISCARD_IN_FLIGHT: | 438 | case DISCARD_IN_FLIGHT: |
433 | return; | 439 | return; |
434 | 440 | ||
@@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl) | |||
689 | if (cl) | 695 | if (cl) |
690 | BUG_ON(!closure_wait(&w->wait, cl)); | 696 | BUG_ON(!closure_wait(&w->wait, cl)); |
691 | 697 | ||
698 | closure_flush(&c->journal.io); | ||
692 | __journal_try_write(c, true); | 699 | __journal_try_write(c, true); |
693 | } | 700 | } |
694 | } | 701 | } |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 786a1a4f74d8..b6a74bcbb08f 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -996,17 +996,19 @@ static void request_write(struct cached_dev *dc, struct search *s) | |||
996 | closure_bio_submit(bio, cl, s->d); | 996 | closure_bio_submit(bio, cl, s->d); |
997 | } else { | 997 | } else { |
998 | bch_writeback_add(dc); | 998 | bch_writeback_add(dc); |
999 | s->op.cache_bio = bio; | ||
999 | 1000 | ||
1000 | if (s->op.flush_journal) { | 1001 | if (bio->bi_rw & REQ_FLUSH) { |
1001 | /* Also need to send a flush to the backing device */ | 1002 | /* Also need to send a flush to the backing device */ |
1002 | s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO, | 1003 | struct bio *flush = bio_alloc_bioset(0, GFP_NOIO, |
1003 | dc->disk.bio_split); | 1004 | dc->disk.bio_split); |
1004 | 1005 | ||
1005 | bio->bi_size = 0; | 1006 | flush->bi_rw = WRITE_FLUSH; |
1006 | bio->bi_vcnt = 0; | 1007 | flush->bi_bdev = bio->bi_bdev; |
1007 | closure_bio_submit(bio, cl, s->d); | 1008 | flush->bi_end_io = request_endio; |
1008 | } else { | 1009 | flush->bi_private = cl; |
1009 | s->op.cache_bio = bio; | 1010 | |
1011 | closure_bio_submit(flush, cl, s->d); | ||
1010 | } | 1012 | } |
1011 | } | 1013 | } |
1012 | out: | 1014 | out: |
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 4fe6ab2fbe2e..924dcfdae111 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
@@ -223,8 +223,13 @@ STORE(__cached_dev) | |||
223 | } | 223 | } |
224 | 224 | ||
225 | if (attr == &sysfs_label) { | 225 | if (attr == &sysfs_label) { |
226 | /* note: endlines are preserved */ | 226 | if (size > SB_LABEL_SIZE) |
227 | memcpy(dc->sb.label, buf, SB_LABEL_SIZE); | 227 | return -EINVAL; |
228 | memcpy(dc->sb.label, buf, size); | ||
229 | if (size < SB_LABEL_SIZE) | ||
230 | dc->sb.label[size] = '\0'; | ||
231 | if (size && dc->sb.label[size - 1] == '\n') | ||
232 | dc->sb.label[size - 1] = '\0'; | ||
228 | bch_write_bdev_super(dc, NULL); | 233 | bch_write_bdev_super(dc, NULL); |
229 | if (dc->disk.c) { | 234 | if (dc->disk.c) { |
230 | memcpy(dc->disk.c->uuids[dc->disk.id].label, | 235 | memcpy(dc->disk.c->uuids[dc->disk.id].label, |
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 98eb81159a22..420dad545c7d 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c | |||
@@ -190,7 +190,16 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) | |||
190 | stats->last = now ?: 1; | 190 | stats->last = now ?: 1; |
191 | } | 191 | } |
192 | 192 | ||
193 | unsigned bch_next_delay(struct ratelimit *d, uint64_t done) | 193 | /** |
194 | * bch_next_delay() - increment @d by the amount of work done, and return how | ||
195 | * long to delay until the next time to do some work. | ||
196 | * | ||
197 | * @d - the struct bch_ratelimit to update | ||
198 | * @done - the amount of work done, in arbitrary units | ||
199 | * | ||
200 | * Returns the amount of time to delay by, in jiffies | ||
201 | */ | ||
202 | uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) | ||
194 | { | 203 | { |
195 | uint64_t now = local_clock(); | 204 | uint64_t now = local_clock(); |
196 | 205 | ||
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 1ae2a73ad85f..ea345c6896f4 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
@@ -450,17 +450,23 @@ read_attribute(name ## _last_ ## frequency_units) | |||
450 | (ewma) >> factor; \ | 450 | (ewma) >> factor; \ |
451 | }) | 451 | }) |
452 | 452 | ||
453 | struct ratelimit { | 453 | struct bch_ratelimit { |
454 | /* Next time we want to do some work, in nanoseconds */ | ||
454 | uint64_t next; | 455 | uint64_t next; |
456 | |||
457 | /* | ||
458 | * Rate at which we want to do work, in units per nanosecond | ||
459 | * The units here correspond to the units passed to bch_next_delay() | ||
460 | */ | ||
455 | unsigned rate; | 461 | unsigned rate; |
456 | }; | 462 | }; |
457 | 463 | ||
458 | static inline void ratelimit_reset(struct ratelimit *d) | 464 | static inline void bch_ratelimit_reset(struct bch_ratelimit *d) |
459 | { | 465 | { |
460 | d->next = local_clock(); | 466 | d->next = local_clock(); |
461 | } | 467 | } |
462 | 468 | ||
463 | unsigned bch_next_delay(struct ratelimit *d, uint64_t done); | 469 | uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done); |
464 | 470 | ||
465 | #define __DIV_SAFE(n, d, zero) \ | 471 | #define __DIV_SAFE(n, d, zero) \ |
466 | ({ \ | 472 | ({ \ |
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 22cbff551628..ba3ee48320f2 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
@@ -94,11 +94,15 @@ static void update_writeback_rate(struct work_struct *work) | |||
94 | 94 | ||
95 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) | 95 | static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) |
96 | { | 96 | { |
97 | uint64_t ret; | ||
98 | |||
97 | if (atomic_read(&dc->disk.detaching) || | 99 | if (atomic_read(&dc->disk.detaching) || |
98 | !dc->writeback_percent) | 100 | !dc->writeback_percent) |
99 | return 0; | 101 | return 0; |
100 | 102 | ||
101 | return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); | 103 | ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL); |
104 | |||
105 | return min_t(uint64_t, ret, HZ); | ||
102 | } | 106 | } |
103 | 107 | ||
104 | /* Background writeback */ | 108 | /* Background writeback */ |
@@ -208,7 +212,7 @@ normal_refill: | |||
208 | 212 | ||
209 | up_write(&dc->writeback_lock); | 213 | up_write(&dc->writeback_lock); |
210 | 214 | ||
211 | ratelimit_reset(&dc->writeback_rate); | 215 | bch_ratelimit_reset(&dc->writeback_rate); |
212 | 216 | ||
213 | /* Punt to workqueue only so we don't recurse and blow the stack */ | 217 | /* Punt to workqueue only so we don't recurse and blow the stack */ |
214 | continue_at(cl, read_dirty, dirty_wq); | 218 | continue_at(cl, read_dirty, dirty_wq); |
@@ -318,9 +322,7 @@ static void write_dirty_finish(struct closure *cl) | |||
318 | } | 322 | } |
319 | 323 | ||
320 | bch_keybuf_del(&dc->writeback_keys, w); | 324 | bch_keybuf_del(&dc->writeback_keys, w); |
321 | atomic_dec_bug(&dc->in_flight); | 325 | up(&dc->in_flight); |
322 | |||
323 | closure_wake_up(&dc->writeback_wait); | ||
324 | 326 | ||
325 | closure_return_with_destructor(cl, dirty_io_destructor); | 327 | closure_return_with_destructor(cl, dirty_io_destructor); |
326 | } | 328 | } |
@@ -349,7 +351,7 @@ static void write_dirty(struct closure *cl) | |||
349 | 351 | ||
350 | closure_bio_submit(&io->bio, cl, &io->dc->disk); | 352 | closure_bio_submit(&io->bio, cl, &io->dc->disk); |
351 | 353 | ||
352 | continue_at(cl, write_dirty_finish, dirty_wq); | 354 | continue_at(cl, write_dirty_finish, system_wq); |
353 | } | 355 | } |
354 | 356 | ||
355 | static void read_dirty_endio(struct bio *bio, int error) | 357 | static void read_dirty_endio(struct bio *bio, int error) |
@@ -369,7 +371,7 @@ static void read_dirty_submit(struct closure *cl) | |||
369 | 371 | ||
370 | closure_bio_submit(&io->bio, cl, &io->dc->disk); | 372 | closure_bio_submit(&io->bio, cl, &io->dc->disk); |
371 | 373 | ||
372 | continue_at(cl, write_dirty, dirty_wq); | 374 | continue_at(cl, write_dirty, system_wq); |
373 | } | 375 | } |
374 | 376 | ||
375 | static void read_dirty(struct closure *cl) | 377 | static void read_dirty(struct closure *cl) |
@@ -394,12 +396,8 @@ static void read_dirty(struct closure *cl) | |||
394 | 396 | ||
395 | if (delay > 0 && | 397 | if (delay > 0 && |
396 | (KEY_START(&w->key) != dc->last_read || | 398 | (KEY_START(&w->key) != dc->last_read || |
397 | jiffies_to_msecs(delay) > 50)) { | 399 | jiffies_to_msecs(delay) > 50)) |
398 | w->private = NULL; | 400 | delay = schedule_timeout_uninterruptible(delay); |
399 | |||
400 | closure_delay(&dc->writeback, delay); | ||
401 | continue_at(cl, read_dirty, dirty_wq); | ||
402 | } | ||
403 | 401 | ||
404 | dc->last_read = KEY_OFFSET(&w->key); | 402 | dc->last_read = KEY_OFFSET(&w->key); |
405 | 403 | ||
@@ -424,15 +422,10 @@ static void read_dirty(struct closure *cl) | |||
424 | 422 | ||
425 | trace_bcache_writeback(&w->key); | 423 | trace_bcache_writeback(&w->key); |
426 | 424 | ||
427 | closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl); | 425 | down(&dc->in_flight); |
426 | closure_call(&io->cl, read_dirty_submit, NULL, cl); | ||
428 | 427 | ||
429 | delay = writeback_delay(dc, KEY_SIZE(&w->key)); | 428 | delay = writeback_delay(dc, KEY_SIZE(&w->key)); |
430 | |||
431 | atomic_inc(&dc->in_flight); | ||
432 | |||
433 | if (!closure_wait_event(&dc->writeback_wait, cl, | ||
434 | atomic_read(&dc->in_flight) < 64)) | ||
435 | continue_at(cl, read_dirty, dirty_wq); | ||
436 | } | 429 | } |
437 | 430 | ||
438 | if (0) { | 431 | if (0) { |
@@ -442,7 +435,11 @@ err: | |||
442 | bch_keybuf_del(&dc->writeback_keys, w); | 435 | bch_keybuf_del(&dc->writeback_keys, w); |
443 | } | 436 | } |
444 | 437 | ||
445 | refill_dirty(cl); | 438 | /* |
439 | * Wait for outstanding writeback IOs to finish (and keybuf slots to be | ||
440 | * freed) before refilling again | ||
441 | */ | ||
442 | continue_at(cl, refill_dirty, dirty_wq); | ||
446 | } | 443 | } |
447 | 444 | ||
448 | /* Init */ | 445 | /* Init */ |
@@ -484,6 +481,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc) | |||
484 | 481 | ||
485 | void bch_cached_dev_writeback_init(struct cached_dev *dc) | 482 | void bch_cached_dev_writeback_init(struct cached_dev *dc) |
486 | { | 483 | { |
484 | sema_init(&dc->in_flight, 64); | ||
487 | closure_init_unlocked(&dc->writeback); | 485 | closure_init_unlocked(&dc->writeback); |
488 | init_rwsem(&dc->writeback_lock); | 486 | init_rwsem(&dc->writeback_lock); |
489 | 487 | ||
@@ -513,7 +511,7 @@ void bch_writeback_exit(void) | |||
513 | 511 | ||
514 | int __init bch_writeback_init(void) | 512 | int __init bch_writeback_init(void) |
515 | { | 513 | { |
516 | dirty_wq = create_singlethread_workqueue("bcache_writeback"); | 514 | dirty_wq = create_workqueue("bcache_writeback"); |
517 | if (!dirty_wq) | 515 | if (!dirty_wq) |
518 | return -ENOMEM; | 516 | return -ENOMEM; |
519 | 517 | ||
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ea49834377c8..2a20986a2fec 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -19,8 +19,6 @@ | |||
19 | #define DM_MSG_PREFIX "io" | 19 | #define DM_MSG_PREFIX "io" |
20 | 20 | ||
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | 21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG |
22 | #define MIN_IOS 16 | ||
23 | #define MIN_BIOS 16 | ||
24 | 22 | ||
25 | struct dm_io_client { | 23 | struct dm_io_client { |
26 | mempool_t *pool; | 24 | mempool_t *pool; |
@@ -50,16 +48,17 @@ static struct kmem_cache *_dm_io_cache; | |||
50 | struct dm_io_client *dm_io_client_create(void) | 48 | struct dm_io_client *dm_io_client_create(void) |
51 | { | 49 | { |
52 | struct dm_io_client *client; | 50 | struct dm_io_client *client; |
51 | unsigned min_ios = dm_get_reserved_bio_based_ios(); | ||
53 | 52 | ||
54 | client = kmalloc(sizeof(*client), GFP_KERNEL); | 53 | client = kmalloc(sizeof(*client), GFP_KERNEL); |
55 | if (!client) | 54 | if (!client) |
56 | return ERR_PTR(-ENOMEM); | 55 | return ERR_PTR(-ENOMEM); |
57 | 56 | ||
58 | client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); | 57 | client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); |
59 | if (!client->pool) | 58 | if (!client->pool) |
60 | goto bad; | 59 | goto bad; |
61 | 60 | ||
62 | client->bios = bioset_create(MIN_BIOS, 0); | 61 | client->bios = bioset_create(min_ios, 0); |
63 | if (!client->bios) | 62 | if (!client->bios) |
64 | goto bad; | 63 | goto bad; |
65 | 64 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b759a127f9c3..de570a558764 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/device-mapper.h> | 8 | #include <linux/device-mapper.h> |
9 | 9 | ||
10 | #include "dm.h" | ||
10 | #include "dm-path-selector.h" | 11 | #include "dm-path-selector.h" |
11 | #include "dm-uevent.h" | 12 | #include "dm-uevent.h" |
12 | 13 | ||
@@ -116,8 +117,6 @@ struct dm_mpath_io { | |||
116 | 117 | ||
117 | typedef int (*action_fn) (struct pgpath *pgpath); | 118 | typedef int (*action_fn) (struct pgpath *pgpath); |
118 | 119 | ||
119 | #define MIN_IOS 256 /* Mempool size */ | ||
120 | |||
121 | static struct kmem_cache *_mpio_cache; | 120 | static struct kmem_cache *_mpio_cache; |
122 | 121 | ||
123 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; | 122 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; |
@@ -190,6 +189,7 @@ static void free_priority_group(struct priority_group *pg, | |||
190 | static struct multipath *alloc_multipath(struct dm_target *ti) | 189 | static struct multipath *alloc_multipath(struct dm_target *ti) |
191 | { | 190 | { |
192 | struct multipath *m; | 191 | struct multipath *m; |
192 | unsigned min_ios = dm_get_reserved_rq_based_ios(); | ||
193 | 193 | ||
194 | m = kzalloc(sizeof(*m), GFP_KERNEL); | 194 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
195 | if (m) { | 195 | if (m) { |
@@ -202,7 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
202 | INIT_WORK(&m->trigger_event, trigger_event); | 202 | INIT_WORK(&m->trigger_event, trigger_event); |
203 | init_waitqueue_head(&m->pg_init_wait); | 203 | init_waitqueue_head(&m->pg_init_wait); |
204 | mutex_init(&m->work_mutex); | 204 | mutex_init(&m->work_mutex); |
205 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 205 | m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); |
206 | if (!m->mpio_pool) { | 206 | if (!m->mpio_pool) { |
207 | kfree(m); | 207 | kfree(m); |
208 | return NULL; | 208 | return NULL; |
@@ -1268,6 +1268,7 @@ static int noretry_error(int error) | |||
1268 | case -EREMOTEIO: | 1268 | case -EREMOTEIO: |
1269 | case -EILSEQ: | 1269 | case -EILSEQ: |
1270 | case -ENODATA: | 1270 | case -ENODATA: |
1271 | case -ENOSPC: | ||
1271 | return 1; | 1272 | return 1; |
1272 | } | 1273 | } |
1273 | 1274 | ||
@@ -1298,8 +1299,17 @@ static int do_end_io(struct multipath *m, struct request *clone, | |||
1298 | if (!error && !clone->errors) | 1299 | if (!error && !clone->errors) |
1299 | return 0; /* I/O complete */ | 1300 | return 0; /* I/O complete */ |
1300 | 1301 | ||
1301 | if (noretry_error(error)) | 1302 | if (noretry_error(error)) { |
1303 | if ((clone->cmd_flags & REQ_WRITE_SAME) && | ||
1304 | !clone->q->limits.max_write_same_sectors) { | ||
1305 | struct queue_limits *limits; | ||
1306 | |||
1307 | /* device doesn't really support WRITE SAME, disable it */ | ||
1308 | limits = dm_get_queue_limits(dm_table_get_md(m->ti->table)); | ||
1309 | limits->max_write_same_sectors = 0; | ||
1310 | } | ||
1302 | return error; | 1311 | return error; |
1312 | } | ||
1303 | 1313 | ||
1304 | if (mpio->pgpath) | 1314 | if (mpio->pgpath) |
1305 | fail_path(mpio->pgpath); | 1315 | fail_path(mpio->pgpath); |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 3ac415675b6c..4caa8e6d59d7 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -256,7 +256,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, | |||
256 | */ | 256 | */ |
257 | INIT_WORK_ONSTACK(&req.work, do_metadata); | 257 | INIT_WORK_ONSTACK(&req.work, do_metadata); |
258 | queue_work(ps->metadata_wq, &req.work); | 258 | queue_work(ps->metadata_wq, &req.work); |
259 | flush_work(&req.work); | 259 | flush_workqueue(ps->metadata_wq); |
260 | 260 | ||
261 | return req.result; | 261 | return req.result; |
262 | } | 262 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c434e5aab2df..aec57d76db5d 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -725,17 +725,16 @@ static int calc_max_buckets(void) | |||
725 | */ | 725 | */ |
726 | static int init_hash_tables(struct dm_snapshot *s) | 726 | static int init_hash_tables(struct dm_snapshot *s) |
727 | { | 727 | { |
728 | sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; | 728 | sector_t hash_size, cow_dev_size, max_buckets; |
729 | 729 | ||
730 | /* | 730 | /* |
731 | * Calculate based on the size of the original volume or | 731 | * Calculate based on the size of the original volume or |
732 | * the COW volume... | 732 | * the COW volume... |
733 | */ | 733 | */ |
734 | cow_dev_size = get_dev_size(s->cow->bdev); | 734 | cow_dev_size = get_dev_size(s->cow->bdev); |
735 | origin_dev_size = get_dev_size(s->origin->bdev); | ||
736 | max_buckets = calc_max_buckets(); | 735 | max_buckets = calc_max_buckets(); |
737 | 736 | ||
738 | hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; | 737 | hash_size = cow_dev_size >> s->store->chunk_shift; |
739 | hash_size = min(hash_size, max_buckets); | 738 | hash_size = min(hash_size, max_buckets); |
740 | 739 | ||
741 | if (hash_size < 64) | 740 | if (hash_size < 64) |
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8ae31e8d3d64..3d404c1371ed 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -451,19 +451,26 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry, | |||
451 | struct dm_stat_percpu *p; | 451 | struct dm_stat_percpu *p; |
452 | 452 | ||
453 | /* | 453 | /* |
454 | * For strict correctness we should use local_irq_disable/enable | 454 | * For strict correctness we should use local_irq_save/restore |
455 | * instead of preempt_disable/enable. | 455 | * instead of preempt_disable/enable. |
456 | * | 456 | * |
457 | * This is racy if the driver finishes bios from non-interrupt | 457 | * preempt_disable/enable is racy if the driver finishes bios |
458 | * context as well as from interrupt context or from more different | 458 | * from non-interrupt context as well as from interrupt context |
459 | * interrupts. | 459 | * or from more different interrupts. |
460 | * | 460 | * |
461 | * However, the race only results in not counting some events, | 461 | * On 64-bit architectures the race only results in not counting some |
462 | * so it is acceptable. | 462 | * events, so it is acceptable. On 32-bit architectures the race could |
463 | * cause the counter going off by 2^32, so we need to do proper locking | ||
464 | * there. | ||
463 | * | 465 | * |
464 | * part_stat_lock()/part_stat_unlock() have this race too. | 466 | * part_stat_lock()/part_stat_unlock() have this race too. |
465 | */ | 467 | */ |
468 | #if BITS_PER_LONG == 32 | ||
469 | unsigned long flags; | ||
470 | local_irq_save(flags); | ||
471 | #else | ||
466 | preempt_disable(); | 472 | preempt_disable(); |
473 | #endif | ||
467 | p = &s->stat_percpu[smp_processor_id()][entry]; | 474 | p = &s->stat_percpu[smp_processor_id()][entry]; |
468 | 475 | ||
469 | if (!end) { | 476 | if (!end) { |
@@ -478,7 +485,11 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry, | |||
478 | p->ticks[idx] += duration; | 485 | p->ticks[idx] += duration; |
479 | } | 486 | } |
480 | 487 | ||
488 | #if BITS_PER_LONG == 32 | ||
489 | local_irq_restore(flags); | ||
490 | #else | ||
481 | preempt_enable(); | 491 | preempt_enable(); |
492 | #endif | ||
482 | } | 493 | } |
483 | 494 | ||
484 | static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, | 495 | static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ed063427d676..2c0cf511ec23 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -2095,6 +2095,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2095 | * them down to the data device. The thin device's discard | 2095 | * them down to the data device. The thin device's discard |
2096 | * processing will cause mappings to be removed from the btree. | 2096 | * processing will cause mappings to be removed from the btree. |
2097 | */ | 2097 | */ |
2098 | ti->discard_zeroes_data_unsupported = true; | ||
2098 | if (pf.discard_enabled && pf.discard_passdown) { | 2099 | if (pf.discard_enabled && pf.discard_passdown) { |
2099 | ti->num_discard_bios = 1; | 2100 | ti->num_discard_bios = 1; |
2100 | 2101 | ||
@@ -2104,7 +2105,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2104 | * thin devices' discard limits consistent). | 2105 | * thin devices' discard limits consistent). |
2105 | */ | 2106 | */ |
2106 | ti->discards_supported = true; | 2107 | ti->discards_supported = true; |
2107 | ti->discard_zeroes_data_unsupported = true; | ||
2108 | } | 2108 | } |
2109 | ti->private = pt; | 2109 | ti->private = pt; |
2110 | 2110 | ||
@@ -2689,8 +2689,16 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
2689 | * They get transferred to the live pool in bind_control_target() | 2689 | * They get transferred to the live pool in bind_control_target() |
2690 | * called from pool_preresume(). | 2690 | * called from pool_preresume(). |
2691 | */ | 2691 | */ |
2692 | if (!pt->adjusted_pf.discard_enabled) | 2692 | if (!pt->adjusted_pf.discard_enabled) { |
2693 | /* | ||
2694 | * Must explicitly disallow stacking discard limits otherwise the | ||
2695 | * block layer will stack them if pool's data device has support. | ||
2696 | * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the | ||
2697 | * user to see that, so make sure to set all discard limits to 0. | ||
2698 | */ | ||
2699 | limits->discard_granularity = 0; | ||
2693 | return; | 2700 | return; |
2701 | } | ||
2694 | 2702 | ||
2695 | disable_passdown_if_not_supported(pt); | 2703 | disable_passdown_if_not_supported(pt); |
2696 | 2704 | ||
@@ -2826,10 +2834,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2826 | ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); | 2834 | ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); |
2827 | 2835 | ||
2828 | /* In case the pool supports discards, pass them on. */ | 2836 | /* In case the pool supports discards, pass them on. */ |
2837 | ti->discard_zeroes_data_unsupported = true; | ||
2829 | if (tc->pool->pf.discard_enabled) { | 2838 | if (tc->pool->pf.discard_enabled) { |
2830 | ti->discards_supported = true; | 2839 | ti->discards_supported = true; |
2831 | ti->num_discard_bios = 1; | 2840 | ti->num_discard_bios = 1; |
2832 | ti->discard_zeroes_data_unsupported = true; | ||
2833 | /* Discard bios must be split on a block boundary */ | 2841 | /* Discard bios must be split on a block boundary */ |
2834 | ti->split_discard_bios = true; | 2842 | ti->split_discard_bios = true; |
2835 | } | 2843 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6a5e9ed2fcc3..b3e26c7d1417 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -211,10 +211,55 @@ struct dm_md_mempools { | |||
211 | struct bio_set *bs; | 211 | struct bio_set *bs; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | #define MIN_IOS 256 | 214 | #define RESERVED_BIO_BASED_IOS 16 |
215 | #define RESERVED_REQUEST_BASED_IOS 256 | ||
216 | #define RESERVED_MAX_IOS 1024 | ||
215 | static struct kmem_cache *_io_cache; | 217 | static struct kmem_cache *_io_cache; |
216 | static struct kmem_cache *_rq_tio_cache; | 218 | static struct kmem_cache *_rq_tio_cache; |
217 | 219 | ||
220 | /* | ||
221 | * Bio-based DM's mempools' reserved IOs set by the user. | ||
222 | */ | ||
223 | static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; | ||
224 | |||
225 | /* | ||
226 | * Request-based DM's mempools' reserved IOs set by the user. | ||
227 | */ | ||
228 | static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; | ||
229 | |||
230 | static unsigned __dm_get_reserved_ios(unsigned *reserved_ios, | ||
231 | unsigned def, unsigned max) | ||
232 | { | ||
233 | unsigned ios = ACCESS_ONCE(*reserved_ios); | ||
234 | unsigned modified_ios = 0; | ||
235 | |||
236 | if (!ios) | ||
237 | modified_ios = def; | ||
238 | else if (ios > max) | ||
239 | modified_ios = max; | ||
240 | |||
241 | if (modified_ios) { | ||
242 | (void)cmpxchg(reserved_ios, ios, modified_ios); | ||
243 | ios = modified_ios; | ||
244 | } | ||
245 | |||
246 | return ios; | ||
247 | } | ||
248 | |||
249 | unsigned dm_get_reserved_bio_based_ios(void) | ||
250 | { | ||
251 | return __dm_get_reserved_ios(&reserved_bio_based_ios, | ||
252 | RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS); | ||
253 | } | ||
254 | EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios); | ||
255 | |||
256 | unsigned dm_get_reserved_rq_based_ios(void) | ||
257 | { | ||
258 | return __dm_get_reserved_ios(&reserved_rq_based_ios, | ||
259 | RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS); | ||
260 | } | ||
261 | EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); | ||
262 | |||
218 | static int __init local_init(void) | 263 | static int __init local_init(void) |
219 | { | 264 | { |
220 | int r = -ENOMEM; | 265 | int r = -ENOMEM; |
@@ -2278,6 +2323,17 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md) | |||
2278 | } | 2323 | } |
2279 | 2324 | ||
2280 | /* | 2325 | /* |
2326 | * The queue_limits are only valid as long as you have a reference | ||
2327 | * count on 'md'. | ||
2328 | */ | ||
2329 | struct queue_limits *dm_get_queue_limits(struct mapped_device *md) | ||
2330 | { | ||
2331 | BUG_ON(!atomic_read(&md->holders)); | ||
2332 | return &md->queue->limits; | ||
2333 | } | ||
2334 | EXPORT_SYMBOL_GPL(dm_get_queue_limits); | ||
2335 | |||
2336 | /* | ||
2281 | * Fully initialize a request-based queue (->elevator, ->request_fn, etc). | 2337 | * Fully initialize a request-based queue (->elevator, ->request_fn, etc). |
2282 | */ | 2338 | */ |
2283 | static int dm_init_request_based_queue(struct mapped_device *md) | 2339 | static int dm_init_request_based_queue(struct mapped_device *md) |
@@ -2862,18 +2918,18 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, u | |||
2862 | 2918 | ||
2863 | if (type == DM_TYPE_BIO_BASED) { | 2919 | if (type == DM_TYPE_BIO_BASED) { |
2864 | cachep = _io_cache; | 2920 | cachep = _io_cache; |
2865 | pool_size = 16; | 2921 | pool_size = dm_get_reserved_bio_based_ios(); |
2866 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); | 2922 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); |
2867 | } else if (type == DM_TYPE_REQUEST_BASED) { | 2923 | } else if (type == DM_TYPE_REQUEST_BASED) { |
2868 | cachep = _rq_tio_cache; | 2924 | cachep = _rq_tio_cache; |
2869 | pool_size = MIN_IOS; | 2925 | pool_size = dm_get_reserved_rq_based_ios(); |
2870 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); | 2926 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); |
2871 | /* per_bio_data_size is not used. See __bind_mempools(). */ | 2927 | /* per_bio_data_size is not used. See __bind_mempools(). */ |
2872 | WARN_ON(per_bio_data_size != 0); | 2928 | WARN_ON(per_bio_data_size != 0); |
2873 | } else | 2929 | } else |
2874 | goto out; | 2930 | goto out; |
2875 | 2931 | ||
2876 | pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep); | 2932 | pools->io_pool = mempool_create_slab_pool(pool_size, cachep); |
2877 | if (!pools->io_pool) | 2933 | if (!pools->io_pool) |
2878 | goto out; | 2934 | goto out; |
2879 | 2935 | ||
@@ -2924,6 +2980,13 @@ module_exit(dm_exit); | |||
2924 | 2980 | ||
2925 | module_param(major, uint, 0); | 2981 | module_param(major, uint, 0); |
2926 | MODULE_PARM_DESC(major, "The major number of the device mapper"); | 2982 | MODULE_PARM_DESC(major, "The major number of the device mapper"); |
2983 | |||
2984 | module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR); | ||
2985 | MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); | ||
2986 | |||
2987 | module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); | ||
2988 | MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); | ||
2989 | |||
2927 | MODULE_DESCRIPTION(DM_NAME " driver"); | 2990 | MODULE_DESCRIPTION(DM_NAME " driver"); |
2928 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | 2991 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); |
2929 | MODULE_LICENSE("GPL"); | 2992 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 5e604cc7b4aa..1d1ad7b7e527 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -184,6 +184,9 @@ void dm_free_md_mempools(struct dm_md_mempools *pools); | |||
184 | /* | 184 | /* |
185 | * Helpers that are used by DM core | 185 | * Helpers that are used by DM core |
186 | */ | 186 | */ |
187 | unsigned dm_get_reserved_bio_based_ios(void); | ||
188 | unsigned dm_get_reserved_rq_based_ios(void); | ||
189 | |||
187 | static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) | 190 | static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) |
188 | { | 191 | { |
189 | return !maxlen || strlen(result) + 1 >= maxlen; | 192 | return !maxlen || strlen(result) + 1 >= maxlen; |
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index d0fdc134068a..f6ff711aa5bb 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c | |||
@@ -57,6 +57,7 @@ void mei_amthif_reset_params(struct mei_device *dev) | |||
57 | dev->iamthif_ioctl = false; | 57 | dev->iamthif_ioctl = false; |
58 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 58 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
59 | dev->iamthif_timer = 0; | 59 | dev->iamthif_timer = 0; |
60 | dev->iamthif_stall_timer = 0; | ||
60 | } | 61 | } |
61 | 62 | ||
62 | /** | 63 | /** |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 6d0282c08a06..cd2033cd7120 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
@@ -297,10 +297,13 @@ int __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) | |||
297 | 297 | ||
298 | if (cl->reading_state != MEI_READ_COMPLETE && | 298 | if (cl->reading_state != MEI_READ_COMPLETE && |
299 | !waitqueue_active(&cl->rx_wait)) { | 299 | !waitqueue_active(&cl->rx_wait)) { |
300 | |||
300 | mutex_unlock(&dev->device_lock); | 301 | mutex_unlock(&dev->device_lock); |
301 | 302 | ||
302 | if (wait_event_interruptible(cl->rx_wait, | 303 | if (wait_event_interruptible(cl->rx_wait, |
303 | (MEI_READ_COMPLETE == cl->reading_state))) { | 304 | cl->reading_state == MEI_READ_COMPLETE || |
305 | mei_cl_is_transitioning(cl))) { | ||
306 | |||
304 | if (signal_pending(current)) | 307 | if (signal_pending(current)) |
305 | return -EINTR; | 308 | return -EINTR; |
306 | return -ERESTARTSYS; | 309 | return -ERESTARTSYS; |
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 9eb031e92070..892cc4207fa2 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h | |||
@@ -90,6 +90,12 @@ static inline bool mei_cl_is_connected(struct mei_cl *cl) | |||
90 | cl->dev->dev_state == MEI_DEV_ENABLED && | 90 | cl->dev->dev_state == MEI_DEV_ENABLED && |
91 | cl->state == MEI_FILE_CONNECTED); | 91 | cl->state == MEI_FILE_CONNECTED); |
92 | } | 92 | } |
93 | static inline bool mei_cl_is_transitioning(struct mei_cl *cl) | ||
94 | { | ||
95 | return (MEI_FILE_INITIALIZING == cl->state || | ||
96 | MEI_FILE_DISCONNECTED == cl->state || | ||
97 | MEI_FILE_DISCONNECTING == cl->state); | ||
98 | } | ||
93 | 99 | ||
94 | bool mei_cl_is_other_connecting(struct mei_cl *cl); | 100 | bool mei_cl_is_other_connecting(struct mei_cl *cl); |
95 | int mei_cl_disconnect(struct mei_cl *cl); | 101 | int mei_cl_disconnect(struct mei_cl *cl); |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 6127ab64bb39..0a0448326e9d 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
@@ -35,11 +35,15 @@ static void mei_hbm_me_cl_allocate(struct mei_device *dev) | |||
35 | struct mei_me_client *clients; | 35 | struct mei_me_client *clients; |
36 | int b; | 36 | int b; |
37 | 37 | ||
38 | dev->me_clients_num = 0; | ||
39 | dev->me_client_presentation_num = 0; | ||
40 | dev->me_client_index = 0; | ||
41 | |||
38 | /* count how many ME clients we have */ | 42 | /* count how many ME clients we have */ |
39 | for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) | 43 | for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX) |
40 | dev->me_clients_num++; | 44 | dev->me_clients_num++; |
41 | 45 | ||
42 | if (dev->me_clients_num <= 0) | 46 | if (dev->me_clients_num == 0) |
43 | return; | 47 | return; |
44 | 48 | ||
45 | kfree(dev->me_clients); | 49 | kfree(dev->me_clients); |
@@ -221,7 +225,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
221 | struct hbm_props_request *prop_req; | 225 | struct hbm_props_request *prop_req; |
222 | const size_t len = sizeof(struct hbm_props_request); | 226 | const size_t len = sizeof(struct hbm_props_request); |
223 | unsigned long next_client_index; | 227 | unsigned long next_client_index; |
224 | u8 client_num; | 228 | unsigned long client_num; |
225 | 229 | ||
226 | 230 | ||
227 | client_num = dev->me_client_presentation_num; | 231 | client_num = dev->me_client_presentation_num; |
@@ -677,8 +681,6 @@ void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
677 | if (dev->dev_state == MEI_DEV_INIT_CLIENTS && | 681 | if (dev->dev_state == MEI_DEV_INIT_CLIENTS && |
678 | dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { | 682 | dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { |
679 | dev->init_clients_timer = 0; | 683 | dev->init_clients_timer = 0; |
680 | dev->me_client_presentation_num = 0; | ||
681 | dev->me_client_index = 0; | ||
682 | mei_hbm_me_cl_allocate(dev); | 684 | mei_hbm_me_cl_allocate(dev); |
683 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; | 685 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; |
684 | 686 | ||
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 92c73118b13c..6197018e2f16 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
@@ -175,6 +175,9 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) | |||
175 | memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); | 175 | memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); |
176 | } | 176 | } |
177 | 177 | ||
178 | /* we're already in reset, cancel the init timer */ | ||
179 | dev->init_clients_timer = 0; | ||
180 | |||
178 | dev->me_clients_num = 0; | 181 | dev->me_clients_num = 0; |
179 | dev->rd_msg_hdr = 0; | 182 | dev->rd_msg_hdr = 0; |
180 | dev->wd_pending = false; | 183 | dev->wd_pending = false; |
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 173ff095be0d..cabeddd66c1f 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
@@ -249,19 +249,16 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, | |||
249 | mutex_unlock(&dev->device_lock); | 249 | mutex_unlock(&dev->device_lock); |
250 | 250 | ||
251 | if (wait_event_interruptible(cl->rx_wait, | 251 | if (wait_event_interruptible(cl->rx_wait, |
252 | (MEI_READ_COMPLETE == cl->reading_state || | 252 | MEI_READ_COMPLETE == cl->reading_state || |
253 | MEI_FILE_INITIALIZING == cl->state || | 253 | mei_cl_is_transitioning(cl))) { |
254 | MEI_FILE_DISCONNECTED == cl->state || | 254 | |
255 | MEI_FILE_DISCONNECTING == cl->state))) { | ||
256 | if (signal_pending(current)) | 255 | if (signal_pending(current)) |
257 | return -EINTR; | 256 | return -EINTR; |
258 | return -ERESTARTSYS; | 257 | return -ERESTARTSYS; |
259 | } | 258 | } |
260 | 259 | ||
261 | mutex_lock(&dev->device_lock); | 260 | mutex_lock(&dev->device_lock); |
262 | if (MEI_FILE_INITIALIZING == cl->state || | 261 | if (mei_cl_is_transitioning(cl)) { |
263 | MEI_FILE_DISCONNECTED == cl->state || | ||
264 | MEI_FILE_DISCONNECTING == cl->state) { | ||
265 | rets = -EBUSY; | 262 | rets = -EBUSY; |
266 | goto out; | 263 | goto out; |
267 | } | 264 | } |
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 7b918b2fb894..456b322013e2 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
@@ -396,9 +396,9 @@ struct mei_device { | |||
396 | struct mei_me_client *me_clients; /* Note: memory has to be allocated */ | 396 | struct mei_me_client *me_clients; /* Note: memory has to be allocated */ |
397 | DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); | 397 | DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); |
398 | DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); | 398 | DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); |
399 | u8 me_clients_num; | 399 | unsigned long me_clients_num; |
400 | u8 me_client_presentation_num; | 400 | unsigned long me_client_presentation_num; |
401 | u8 me_client_index; | 401 | unsigned long me_client_index; |
402 | 402 | ||
403 | struct mei_cl wd_cl; | 403 | struct mei_cl wd_cl; |
404 | enum mei_wd_states wd_state; | 404 | enum mei_wd_states wd_state; |
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 87ed3fb5149a..f344659dceac 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
@@ -113,14 +113,14 @@ static const struct sh_mobile_sdhi_ops sdhi_ops = { | |||
113 | }; | 113 | }; |
114 | 114 | ||
115 | static const struct of_device_id sh_mobile_sdhi_of_match[] = { | 115 | static const struct of_device_id sh_mobile_sdhi_of_match[] = { |
116 | { .compatible = "renesas,shmobile-sdhi" }, | 116 | { .compatible = "renesas,sdhi-shmobile" }, |
117 | { .compatible = "renesas,sh7372-sdhi" }, | 117 | { .compatible = "renesas,sdhi-sh7372" }, |
118 | { .compatible = "renesas,sh73a0-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, | 118 | { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], }, |
119 | { .compatible = "renesas,r8a73a4-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, | 119 | { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], }, |
120 | { .compatible = "renesas,r8a7740-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, | 120 | { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], }, |
121 | { .compatible = "renesas,r8a7778-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, | 121 | { .compatible = "renesas,sdhi-r8a7778", .data = &sh_mobile_sdhi_of_cfg[0], }, |
122 | { .compatible = "renesas,r8a7779-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, | 122 | { .compatible = "renesas,sdhi-r8a7779", .data = &sh_mobile_sdhi_of_cfg[0], }, |
123 | { .compatible = "renesas,r8a7790-sdhi", .data = &sh_mobile_sdhi_of_cfg[0], }, | 123 | { .compatible = "renesas,sdhi-r8a7790", .data = &sh_mobile_sdhi_of_cfg[0], }, |
124 | {}, | 124 | {}, |
125 | }; | 125 | }; |
126 | MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); | 126 | MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 26b14f9fcac6..6bc9618af094 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -168,12 +168,25 @@ static inline int write_disable(struct m25p *flash) | |||
168 | */ | 168 | */ |
169 | static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) | 169 | static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) |
170 | { | 170 | { |
171 | int status; | ||
172 | bool need_wren = false; | ||
173 | |||
171 | switch (JEDEC_MFR(jedec_id)) { | 174 | switch (JEDEC_MFR(jedec_id)) { |
172 | case CFI_MFR_MACRONIX: | ||
173 | case CFI_MFR_ST: /* Micron, actually */ | 175 | case CFI_MFR_ST: /* Micron, actually */ |
176 | /* Some Micron need WREN command; all will accept it */ | ||
177 | need_wren = true; | ||
178 | case CFI_MFR_MACRONIX: | ||
174 | case 0xEF /* winbond */: | 179 | case 0xEF /* winbond */: |
180 | if (need_wren) | ||
181 | write_enable(flash); | ||
182 | |||
175 | flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; | 183 | flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; |
176 | return spi_write(flash->spi, flash->command, 1); | 184 | status = spi_write(flash->spi, flash->command, 1); |
185 | |||
186 | if (need_wren) | ||
187 | write_disable(flash); | ||
188 | |||
189 | return status; | ||
177 | default: | 190 | default: |
178 | /* Spansion style */ | 191 | /* Spansion style */ |
179 | flash->command[0] = OPCODE_BRWR; | 192 | flash->command[0] = OPCODE_BRWR; |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 7ed4841327f2..d340b2f198c6 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -2869,10 +2869,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd, | |||
2869 | 2869 | ||
2870 | len = le16_to_cpu(p->ext_param_page_length) * 16; | 2870 | len = le16_to_cpu(p->ext_param_page_length) * 16; |
2871 | ep = kmalloc(len, GFP_KERNEL); | 2871 | ep = kmalloc(len, GFP_KERNEL); |
2872 | if (!ep) { | 2872 | if (!ep) |
2873 | ret = -ENOMEM; | 2873 | return -ENOMEM; |
2874 | goto ext_out; | ||
2875 | } | ||
2876 | 2874 | ||
2877 | /* Send our own NAND_CMD_PARAM. */ | 2875 | /* Send our own NAND_CMD_PARAM. */ |
2878 | chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); | 2876 | chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); |
@@ -2920,7 +2918,7 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd, | |||
2920 | } | 2918 | } |
2921 | 2919 | ||
2922 | pr_info("ONFI extended param page detected.\n"); | 2920 | pr_info("ONFI extended param page detected.\n"); |
2923 | return 0; | 2921 | ret = 0; |
2924 | 2922 | ||
2925 | ext_out: | 2923 | ext_out: |
2926 | kfree(ep); | 2924 | kfree(ep); |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 55bbb8b8200c..e883bfe2e727 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1724,6 +1724,7 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1724 | struct bonding *bond = netdev_priv(bond_dev); | 1724 | struct bonding *bond = netdev_priv(bond_dev); |
1725 | struct slave *slave, *oldcurrent; | 1725 | struct slave *slave, *oldcurrent; |
1726 | struct sockaddr addr; | 1726 | struct sockaddr addr; |
1727 | int old_flags = bond_dev->flags; | ||
1727 | netdev_features_t old_features = bond_dev->features; | 1728 | netdev_features_t old_features = bond_dev->features; |
1728 | 1729 | ||
1729 | /* slave is not a slave or master is not master of this slave */ | 1730 | /* slave is not a slave or master is not master of this slave */ |
@@ -1855,12 +1856,18 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1855 | * bond_change_active_slave(..., NULL) | 1856 | * bond_change_active_slave(..., NULL) |
1856 | */ | 1857 | */ |
1857 | if (!USES_PRIMARY(bond->params.mode)) { | 1858 | if (!USES_PRIMARY(bond->params.mode)) { |
1858 | /* unset promiscuity level from slave */ | 1859 | /* unset promiscuity level from slave |
1859 | if (bond_dev->flags & IFF_PROMISC) | 1860 | * NOTE: The NETDEV_CHANGEADDR call above may change the value |
1861 | * of the IFF_PROMISC flag in the bond_dev, but we need the | ||
1862 | * value of that flag before that change, as that was the value | ||
1863 | * when this slave was attached, so we cache at the start of the | ||
1864 | * function and use it here. Same goes for ALLMULTI below | ||
1865 | */ | ||
1866 | if (old_flags & IFF_PROMISC) | ||
1860 | dev_set_promiscuity(slave_dev, -1); | 1867 | dev_set_promiscuity(slave_dev, -1); |
1861 | 1868 | ||
1862 | /* unset allmulti level from slave */ | 1869 | /* unset allmulti level from slave */ |
1863 | if (bond_dev->flags & IFF_ALLMULTI) | 1870 | if (old_flags & IFF_ALLMULTI) |
1864 | dev_set_allmulti(slave_dev, -1); | 1871 | dev_set_allmulti(slave_dev, -1); |
1865 | 1872 | ||
1866 | bond_hw_addr_flush(bond_dev, slave_dev); | 1873 | bond_hw_addr_flush(bond_dev, slave_dev); |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 71c677e651d7..3f21142138b7 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -702,7 +702,6 @@ static int flexcan_chip_start(struct net_device *dev) | |||
702 | { | 702 | { |
703 | struct flexcan_priv *priv = netdev_priv(dev); | 703 | struct flexcan_priv *priv = netdev_priv(dev); |
704 | struct flexcan_regs __iomem *regs = priv->base; | 704 | struct flexcan_regs __iomem *regs = priv->base; |
705 | unsigned int i; | ||
706 | int err; | 705 | int err; |
707 | u32 reg_mcr, reg_ctrl; | 706 | u32 reg_mcr, reg_ctrl; |
708 | 707 | ||
@@ -772,17 +771,6 @@ static int flexcan_chip_start(struct net_device *dev) | |||
772 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); | 771 | netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); |
773 | flexcan_write(reg_ctrl, ®s->ctrl); | 772 | flexcan_write(reg_ctrl, ®s->ctrl); |
774 | 773 | ||
775 | for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) { | ||
776 | flexcan_write(0, ®s->cantxfg[i].can_ctrl); | ||
777 | flexcan_write(0, ®s->cantxfg[i].can_id); | ||
778 | flexcan_write(0, ®s->cantxfg[i].data[0]); | ||
779 | flexcan_write(0, ®s->cantxfg[i].data[1]); | ||
780 | |||
781 | /* put MB into rx queue */ | ||
782 | flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), | ||
783 | ®s->cantxfg[i].can_ctrl); | ||
784 | } | ||
785 | |||
786 | /* acceptance mask/acceptance code (accept everything) */ | 774 | /* acceptance mask/acceptance code (accept everything) */ |
787 | flexcan_write(0x0, ®s->rxgmask); | 775 | flexcan_write(0x0, ®s->rxgmask); |
788 | flexcan_write(0x0, ®s->rx14mask); | 776 | flexcan_write(0x0, ®s->rx14mask); |
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 874188ba06f7..25377e547f9b 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
@@ -76,6 +76,10 @@ MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces"); | |||
76 | /* maximum rx buffer len: extended CAN frame with timestamp */ | 76 | /* maximum rx buffer len: extended CAN frame with timestamp */ |
77 | #define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) | 77 | #define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1) |
78 | 78 | ||
79 | #define SLC_CMD_LEN 1 | ||
80 | #define SLC_SFF_ID_LEN 3 | ||
81 | #define SLC_EFF_ID_LEN 8 | ||
82 | |||
79 | struct slcan { | 83 | struct slcan { |
80 | int magic; | 84 | int magic; |
81 | 85 | ||
@@ -142,47 +146,63 @@ static void slc_bump(struct slcan *sl) | |||
142 | { | 146 | { |
143 | struct sk_buff *skb; | 147 | struct sk_buff *skb; |
144 | struct can_frame cf; | 148 | struct can_frame cf; |
145 | int i, dlc_pos, tmp; | 149 | int i, tmp; |
146 | unsigned long ultmp; | 150 | u32 tmpid; |
147 | char cmd = sl->rbuff[0]; | 151 | char *cmd = sl->rbuff; |
148 | 152 | ||
149 | if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R')) | 153 | cf.can_id = 0; |
154 | |||
155 | switch (*cmd) { | ||
156 | case 'r': | ||
157 | cf.can_id = CAN_RTR_FLAG; | ||
158 | /* fallthrough */ | ||
159 | case 't': | ||
160 | /* store dlc ASCII value and terminate SFF CAN ID string */ | ||
161 | cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; | ||
162 | sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0; | ||
163 | /* point to payload data behind the dlc */ | ||
164 | cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1; | ||
165 | break; | ||
166 | case 'R': | ||
167 | cf.can_id = CAN_RTR_FLAG; | ||
168 | /* fallthrough */ | ||
169 | case 'T': | ||
170 | cf.can_id |= CAN_EFF_FLAG; | ||
171 | /* store dlc ASCII value and terminate EFF CAN ID string */ | ||
172 | cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; | ||
173 | sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0; | ||
174 | /* point to payload data behind the dlc */ | ||
175 | cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1; | ||
176 | break; | ||
177 | default: | ||
150 | return; | 178 | return; |
179 | } | ||
151 | 180 | ||
152 | if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */ | 181 | if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid)) |
153 | dlc_pos = 4; /* dlc position tiiid */ | ||
154 | else | ||
155 | dlc_pos = 9; /* dlc position Tiiiiiiiid */ | ||
156 | |||
157 | if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9'))) | ||
158 | return; | 182 | return; |
159 | 183 | ||
160 | cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */ | 184 | cf.can_id |= tmpid; |
161 | 185 | ||
162 | sl->rbuff[dlc_pos] = 0; /* terminate can_id string */ | 186 | /* get can_dlc from sanitized ASCII value */ |
163 | 187 | if (cf.can_dlc >= '0' && cf.can_dlc < '9') | |
164 | if (kstrtoul(sl->rbuff+1, 16, &ultmp)) | 188 | cf.can_dlc -= '0'; |
189 | else | ||
165 | return; | 190 | return; |
166 | 191 | ||
167 | cf.can_id = ultmp; | ||
168 | |||
169 | if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */ | ||
170 | cf.can_id |= CAN_EFF_FLAG; | ||
171 | |||
172 | if ((cmd | 0x20) == 'r') /* RTR frame */ | ||
173 | cf.can_id |= CAN_RTR_FLAG; | ||
174 | |||
175 | *(u64 *) (&cf.data) = 0; /* clear payload */ | 192 | *(u64 *) (&cf.data) = 0; /* clear payload */ |
176 | 193 | ||
177 | for (i = 0, dlc_pos++; i < cf.can_dlc; i++) { | 194 | /* RTR frames may have a dlc > 0 but they never have any data bytes */ |
178 | tmp = hex_to_bin(sl->rbuff[dlc_pos++]); | 195 | if (!(cf.can_id & CAN_RTR_FLAG)) { |
179 | if (tmp < 0) | 196 | for (i = 0; i < cf.can_dlc; i++) { |
180 | return; | 197 | tmp = hex_to_bin(*cmd++); |
181 | cf.data[i] = (tmp << 4); | 198 | if (tmp < 0) |
182 | tmp = hex_to_bin(sl->rbuff[dlc_pos++]); | 199 | return; |
183 | if (tmp < 0) | 200 | cf.data[i] = (tmp << 4); |
184 | return; | 201 | tmp = hex_to_bin(*cmd++); |
185 | cf.data[i] |= tmp; | 202 | if (tmp < 0) |
203 | return; | ||
204 | cf.data[i] |= tmp; | ||
205 | } | ||
186 | } | 206 | } |
187 | 207 | ||
188 | skb = dev_alloc_skb(sizeof(struct can_frame) + | 208 | skb = dev_alloc_skb(sizeof(struct can_frame) + |
@@ -209,7 +229,6 @@ static void slc_bump(struct slcan *sl) | |||
209 | /* parse tty input stream */ | 229 | /* parse tty input stream */ |
210 | static void slcan_unesc(struct slcan *sl, unsigned char s) | 230 | static void slcan_unesc(struct slcan *sl, unsigned char s) |
211 | { | 231 | { |
212 | |||
213 | if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ | 232 | if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ |
214 | if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && | 233 | if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && |
215 | (sl->rcount > 4)) { | 234 | (sl->rcount > 4)) { |
@@ -236,27 +255,46 @@ static void slcan_unesc(struct slcan *sl, unsigned char s) | |||
236 | /* Encapsulate one can_frame and stuff into a TTY queue. */ | 255 | /* Encapsulate one can_frame and stuff into a TTY queue. */ |
237 | static void slc_encaps(struct slcan *sl, struct can_frame *cf) | 256 | static void slc_encaps(struct slcan *sl, struct can_frame *cf) |
238 | { | 257 | { |
239 | int actual, idx, i; | 258 | int actual, i; |
240 | char cmd; | 259 | unsigned char *pos; |
260 | unsigned char *endpos; | ||
261 | canid_t id = cf->can_id; | ||
262 | |||
263 | pos = sl->xbuff; | ||
241 | 264 | ||
242 | if (cf->can_id & CAN_RTR_FLAG) | 265 | if (cf->can_id & CAN_RTR_FLAG) |
243 | cmd = 'R'; /* becomes 'r' in standard frame format */ | 266 | *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */ |
244 | else | 267 | else |
245 | cmd = 'T'; /* becomes 't' in standard frame format */ | 268 | *pos = 'T'; /* becomes 't' in standard frame format (SSF) */ |
246 | 269 | ||
247 | if (cf->can_id & CAN_EFF_FLAG) | 270 | /* determine number of chars for the CAN-identifier */ |
248 | sprintf(sl->xbuff, "%c%08X%d", cmd, | 271 | if (cf->can_id & CAN_EFF_FLAG) { |
249 | cf->can_id & CAN_EFF_MASK, cf->can_dlc); | 272 | id &= CAN_EFF_MASK; |
250 | else | 273 | endpos = pos + SLC_EFF_ID_LEN; |
251 | sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20, | 274 | } else { |
252 | cf->can_id & CAN_SFF_MASK, cf->can_dlc); | 275 | *pos |= 0x20; /* convert R/T to lower case for SFF */ |
276 | id &= CAN_SFF_MASK; | ||
277 | endpos = pos + SLC_SFF_ID_LEN; | ||
278 | } | ||
253 | 279 | ||
254 | idx = strlen(sl->xbuff); | 280 | /* build 3 (SFF) or 8 (EFF) digit CAN identifier */ |
281 | pos++; | ||
282 | while (endpos >= pos) { | ||
283 | *endpos-- = hex_asc_upper[id & 0xf]; | ||
284 | id >>= 4; | ||
285 | } | ||
286 | |||
287 | pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN; | ||
255 | 288 | ||
256 | for (i = 0; i < cf->can_dlc; i++) | 289 | *pos++ = cf->can_dlc + '0'; |
257 | sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]); | 290 | |
291 | /* RTR frames may have a dlc > 0 but they never have any data bytes */ | ||
292 | if (!(cf->can_id & CAN_RTR_FLAG)) { | ||
293 | for (i = 0; i < cf->can_dlc; i++) | ||
294 | pos = hex_byte_pack_upper(pos, cf->data[i]); | ||
295 | } | ||
258 | 296 | ||
259 | strcat(sl->xbuff, "\r"); /* add terminating character */ | 297 | *pos++ = '\r'; |
260 | 298 | ||
261 | /* Order of next two lines is *very* important. | 299 | /* Order of next two lines is *very* important. |
262 | * When we are sending a little amount of data, | 300 | * When we are sending a little amount of data, |
@@ -267,8 +305,8 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf) | |||
267 | * 14 Oct 1994 Dmitry Gorodchanin. | 305 | * 14 Oct 1994 Dmitry Gorodchanin. |
268 | */ | 306 | */ |
269 | set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); | 307 | set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); |
270 | actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff)); | 308 | actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); |
271 | sl->xleft = strlen(sl->xbuff) - actual; | 309 | sl->xleft = (pos - sl->xbuff) - actual; |
272 | sl->xhead = sl->xbuff + actual; | 310 | sl->xhead = sl->xbuff + actual; |
273 | sl->dev->stats.tx_bytes += cf->can_dlc; | 311 | sl->dev->stats.tx_bytes += cf->can_dlc; |
274 | } | 312 | } |
@@ -286,11 +324,13 @@ static void slcan_write_wakeup(struct tty_struct *tty) | |||
286 | if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) | 324 | if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) |
287 | return; | 325 | return; |
288 | 326 | ||
327 | spin_lock(&sl->lock); | ||
289 | if (sl->xleft <= 0) { | 328 | if (sl->xleft <= 0) { |
290 | /* Now serial buffer is almost free & we can start | 329 | /* Now serial buffer is almost free & we can start |
291 | * transmission of another packet */ | 330 | * transmission of another packet */ |
292 | sl->dev->stats.tx_packets++; | 331 | sl->dev->stats.tx_packets++; |
293 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 332 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
333 | spin_unlock(&sl->lock); | ||
294 | netif_wake_queue(sl->dev); | 334 | netif_wake_queue(sl->dev); |
295 | return; | 335 | return; |
296 | } | 336 | } |
@@ -298,6 +338,7 @@ static void slcan_write_wakeup(struct tty_struct *tty) | |||
298 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); | 338 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); |
299 | sl->xleft -= actual; | 339 | sl->xleft -= actual; |
300 | sl->xhead += actual; | 340 | sl->xhead += actual; |
341 | spin_unlock(&sl->lock); | ||
301 | } | 342 | } |
302 | 343 | ||
303 | /* Send a can_frame to a TTY queue. */ | 344 | /* Send a can_frame to a TTY queue. */ |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index a0f647f92bf5..0b7a4c3b01a2 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |||
@@ -463,7 +463,7 @@ static int peak_usb_start(struct peak_usb_device *dev) | |||
463 | if (i < PCAN_USB_MAX_TX_URBS) { | 463 | if (i < PCAN_USB_MAX_TX_URBS) { |
464 | if (i == 0) { | 464 | if (i == 0) { |
465 | netdev_err(netdev, "couldn't setup any tx URB\n"); | 465 | netdev_err(netdev, "couldn't setup any tx URB\n"); |
466 | return err; | 466 | goto err_tx; |
467 | } | 467 | } |
468 | 468 | ||
469 | netdev_warn(netdev, "tx performance may be slow\n"); | 469 | netdev_warn(netdev, "tx performance may be slow\n"); |
@@ -472,7 +472,7 @@ static int peak_usb_start(struct peak_usb_device *dev) | |||
472 | if (dev->adapter->dev_start) { | 472 | if (dev->adapter->dev_start) { |
473 | err = dev->adapter->dev_start(dev); | 473 | err = dev->adapter->dev_start(dev); |
474 | if (err) | 474 | if (err) |
475 | goto failed; | 475 | goto err_adapter; |
476 | } | 476 | } |
477 | 477 | ||
478 | dev->state |= PCAN_USB_STATE_STARTED; | 478 | dev->state |= PCAN_USB_STATE_STARTED; |
@@ -481,19 +481,26 @@ static int peak_usb_start(struct peak_usb_device *dev) | |||
481 | if (dev->adapter->dev_set_bus) { | 481 | if (dev->adapter->dev_set_bus) { |
482 | err = dev->adapter->dev_set_bus(dev, 1); | 482 | err = dev->adapter->dev_set_bus(dev, 1); |
483 | if (err) | 483 | if (err) |
484 | goto failed; | 484 | goto err_adapter; |
485 | } | 485 | } |
486 | 486 | ||
487 | dev->can.state = CAN_STATE_ERROR_ACTIVE; | 487 | dev->can.state = CAN_STATE_ERROR_ACTIVE; |
488 | 488 | ||
489 | return 0; | 489 | return 0; |
490 | 490 | ||
491 | failed: | 491 | err_adapter: |
492 | if (err == -ENODEV) | 492 | if (err == -ENODEV) |
493 | netif_device_detach(dev->netdev); | 493 | netif_device_detach(dev->netdev); |
494 | 494 | ||
495 | netdev_warn(netdev, "couldn't submit control: %d\n", err); | 495 | netdev_warn(netdev, "couldn't submit control: %d\n", err); |
496 | 496 | ||
497 | for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { | ||
498 | usb_free_urb(dev->tx_contexts[i].urb); | ||
499 | dev->tx_contexts[i].urb = NULL; | ||
500 | } | ||
501 | err_tx: | ||
502 | usb_kill_anchored_urbs(&dev->rx_submitted); | ||
503 | |||
497 | return err; | 504 | return err; |
498 | } | 505 | } |
499 | 506 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 61726af1de6e..e66beff2704d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2481,8 +2481,7 @@ load_error_cnic2: | |||
2481 | load_error_cnic1: | 2481 | load_error_cnic1: |
2482 | bnx2x_napi_disable_cnic(bp); | 2482 | bnx2x_napi_disable_cnic(bp); |
2483 | /* Update the number of queues without the cnic queues */ | 2483 | /* Update the number of queues without the cnic queues */ |
2484 | rc = bnx2x_set_real_num_queues(bp, 0); | 2484 | if (bnx2x_set_real_num_queues(bp, 0)) |
2485 | if (rc) | ||
2486 | BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); | 2485 | BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); |
2487 | load_error_cnic0: | 2486 | load_error_cnic0: |
2488 | BNX2X_ERR("CNIC-related load failed\n"); | 2487 | BNX2X_ERR("CNIC-related load failed\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index d60a2ea3da19..51468227bf3b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, | |||
175 | #define EDC_MODE_LINEAR 0x0022 | 175 | #define EDC_MODE_LINEAR 0x0022 |
176 | #define EDC_MODE_LIMITING 0x0044 | 176 | #define EDC_MODE_LIMITING 0x0044 |
177 | #define EDC_MODE_PASSIVE_DAC 0x0055 | 177 | #define EDC_MODE_PASSIVE_DAC 0x0055 |
178 | #define EDC_MODE_ACTIVE_DAC 0x0066 | ||
178 | 179 | ||
179 | /* ETS defines*/ | 180 | /* ETS defines*/ |
180 | #define DCBX_INVALID_COS (0xFF) | 181 | #define DCBX_INVALID_COS (0xFF) |
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, | |||
3684 | bnx2x_update_link_attr(params, vars->link_attr_sync); | 3685 | bnx2x_update_link_attr(params, vars->link_attr_sync); |
3685 | } | 3686 | } |
3686 | 3687 | ||
3688 | static void bnx2x_disable_kr2(struct link_params *params, | ||
3689 | struct link_vars *vars, | ||
3690 | struct bnx2x_phy *phy) | ||
3691 | { | ||
3692 | struct bnx2x *bp = params->bp; | ||
3693 | int i; | ||
3694 | static struct bnx2x_reg_set reg_set[] = { | ||
3695 | /* Step 1 - Program the TX/RX alignment markers */ | ||
3696 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, | ||
3697 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, | ||
3698 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, | ||
3699 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, | ||
3700 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, | ||
3701 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, | ||
3702 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, | ||
3703 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, | ||
3704 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, | ||
3705 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, | ||
3706 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, | ||
3707 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, | ||
3708 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, | ||
3709 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, | ||
3710 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} | ||
3711 | }; | ||
3712 | DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); | ||
3713 | |||
3714 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) | ||
3715 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, | ||
3716 | reg_set[i].val); | ||
3717 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; | ||
3718 | bnx2x_update_link_attr(params, vars->link_attr_sync); | ||
3719 | |||
3720 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; | ||
3721 | } | ||
3722 | |||
3687 | static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, | 3723 | static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, |
3688 | struct link_params *params) | 3724 | struct link_params *params) |
3689 | { | 3725 | { |
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3715 | struct link_params *params, | 3751 | struct link_params *params, |
3716 | struct link_vars *vars) { | 3752 | struct link_vars *vars) { |
3717 | u16 lane, i, cl72_ctrl, an_adv = 0; | 3753 | u16 lane, i, cl72_ctrl, an_adv = 0; |
3718 | u16 ucode_ver; | ||
3719 | struct bnx2x *bp = params->bp; | 3754 | struct bnx2x *bp = params->bp; |
3720 | static struct bnx2x_reg_set reg_set[] = { | 3755 | static struct bnx2x_reg_set reg_set[] = { |
3721 | {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, | 3756 | {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, |
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3806 | 3841 | ||
3807 | /* Advertise pause */ | 3842 | /* Advertise pause */ |
3808 | bnx2x_ext_phy_set_pause(params, phy, vars); | 3843 | bnx2x_ext_phy_set_pause(params, phy, vars); |
3809 | /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 | 3844 | vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; |
3810 | */ | ||
3811 | bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, | ||
3812 | MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver); | ||
3813 | if (ucode_ver < 0xd108) { | ||
3814 | DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n", | ||
3815 | ucode_ver); | ||
3816 | vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; | ||
3817 | } | ||
3818 | bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, | 3845 | bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, |
3819 | MDIO_WC_REG_DIGITAL5_MISC7, 0x100); | 3846 | MDIO_WC_REG_DIGITAL5_MISC7, 0x100); |
3820 | 3847 | ||
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, | |||
3838 | bnx2x_set_aer_mmd(params, phy); | 3865 | bnx2x_set_aer_mmd(params, phy); |
3839 | 3866 | ||
3840 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); | 3867 | bnx2x_warpcore_enable_AN_KR2(phy, params, vars); |
3868 | } else { | ||
3869 | bnx2x_disable_kr2(params, vars, phy); | ||
3841 | } | 3870 | } |
3842 | 3871 | ||
3843 | /* Enable Autoneg: only on the main lane */ | 3872 | /* Enable Autoneg: only on the main lane */ |
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, | |||
4347 | struct bnx2x *bp = params->bp; | 4376 | struct bnx2x *bp = params->bp; |
4348 | u32 serdes_net_if; | 4377 | u32 serdes_net_if; |
4349 | u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; | 4378 | u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; |
4350 | u16 lane = bnx2x_get_warpcore_lane(phy, params); | ||
4351 | 4379 | ||
4352 | vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; | 4380 | vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; |
4353 | 4381 | ||
4354 | if (!vars->turn_to_run_wc_rt) | 4382 | if (!vars->turn_to_run_wc_rt) |
4355 | return; | 4383 | return; |
4356 | 4384 | ||
4357 | /* Return if there is no link partner */ | ||
4358 | if (!(bnx2x_warpcore_get_sigdet(phy, params))) { | ||
4359 | DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); | ||
4360 | return; | ||
4361 | } | ||
4362 | |||
4363 | if (vars->rx_tx_asic_rst) { | 4385 | if (vars->rx_tx_asic_rst) { |
4386 | u16 lane = bnx2x_get_warpcore_lane(phy, params); | ||
4364 | serdes_net_if = (REG_RD(bp, params->shmem_base + | 4387 | serdes_net_if = (REG_RD(bp, params->shmem_base + |
4365 | offsetof(struct shmem_region, dev_info. | 4388 | offsetof(struct shmem_region, dev_info. |
4366 | port_hw_config[params->port].default_cfg)) & | 4389 | port_hw_config[params->port].default_cfg)) & |
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, | |||
4375 | /*10G KR*/ | 4398 | /*10G KR*/ |
4376 | lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; | 4399 | lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; |
4377 | 4400 | ||
4378 | DP(NETIF_MSG_LINK, | ||
4379 | "gp_status1 0x%x\n", gp_status1); | ||
4380 | |||
4381 | if (lnkup_kr || lnkup) { | 4401 | if (lnkup_kr || lnkup) { |
4382 | vars->rx_tx_asic_rst = 0; | 4402 | vars->rx_tx_asic_rst = 0; |
4383 | DP(NETIF_MSG_LINK, | ||
4384 | "link up, rx_tx_asic_rst 0x%x\n", | ||
4385 | vars->rx_tx_asic_rst); | ||
4386 | } else { | 4403 | } else { |
4387 | /* Reset the lane to see if link comes up.*/ | 4404 | /* Reset the lane to see if link comes up.*/ |
4388 | bnx2x_warpcore_reset_lane(bp, phy, 1); | 4405 | bnx2x_warpcore_reset_lane(bp, phy, 1); |
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, | |||
4507 | * enabled transmitter to avoid current leakage in case | 4524 | * enabled transmitter to avoid current leakage in case |
4508 | * no module is connected | 4525 | * no module is connected |
4509 | */ | 4526 | */ |
4510 | if (bnx2x_is_sfp_module_plugged(phy, params)) | 4527 | if ((params->loopback_mode == LOOPBACK_NONE) || |
4511 | bnx2x_sfp_module_detection(phy, params); | 4528 | (params->loopback_mode == LOOPBACK_EXT)) { |
4512 | else | 4529 | if (bnx2x_is_sfp_module_plugged(phy, params)) |
4513 | bnx2x_sfp_e3_set_transmitter(params, phy, 1); | 4530 | bnx2x_sfp_module_detection(phy, params); |
4531 | else | ||
4532 | bnx2x_sfp_e3_set_transmitter(params, | ||
4533 | phy, 1); | ||
4534 | } | ||
4514 | 4535 | ||
4515 | bnx2x_warpcore_config_sfi(phy, params); | 4536 | bnx2x_warpcore_config_sfi(phy, params); |
4516 | break; | 4537 | break; |
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, | |||
5757 | rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, | 5778 | rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, |
5758 | duplex); | 5779 | duplex); |
5759 | 5780 | ||
5781 | /* In case of KR link down, start up the recovering procedure */ | ||
5782 | if ((!link_up) && (phy->media_type == ETH_PHY_KR) && | ||
5783 | (!(phy->flags & FLAGS_WC_DUAL_MODE))) | ||
5784 | vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; | ||
5785 | |||
5760 | DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", | 5786 | DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", |
5761 | vars->duplex, vars->flow_ctrl, vars->link_status); | 5787 | vars->duplex, vars->flow_ctrl, vars->link_status); |
5762 | return rc; | 5788 | return rc; |
@@ -6507,6 +6533,11 @@ static int bnx2x_link_initialize(struct link_params *params, | |||
6507 | params->phy[INT_PHY].config_init(phy, params, vars); | 6533 | params->phy[INT_PHY].config_init(phy, params, vars); |
6508 | } | 6534 | } |
6509 | 6535 | ||
6536 | /* Re-read this value in case it was changed inside config_init due to | ||
6537 | * limitations of optic module | ||
6538 | */ | ||
6539 | vars->line_speed = params->phy[INT_PHY].req_line_speed; | ||
6540 | |||
6510 | /* Init external phy*/ | 6541 | /* Init external phy*/ |
6511 | if (non_ext_phy) { | 6542 | if (non_ext_phy) { |
6512 | if (params->phy[INT_PHY].supported & | 6543 | if (params->phy[INT_PHY].supported & |
@@ -8080,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8080 | if (copper_module_type & | 8111 | if (copper_module_type & |
8081 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { | 8112 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { |
8082 | DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); | 8113 | DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); |
8083 | check_limiting_mode = 1; | 8114 | if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) |
8115 | *edc_mode = EDC_MODE_ACTIVE_DAC; | ||
8116 | else | ||
8117 | check_limiting_mode = 1; | ||
8084 | } else if (copper_module_type & | 8118 | } else if (copper_module_type & |
8085 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { | 8119 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { |
8086 | DP(NETIF_MSG_LINK, | 8120 | DP(NETIF_MSG_LINK, |
@@ -8555,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, | |||
8555 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; | 8589 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; |
8556 | break; | 8590 | break; |
8557 | case EDC_MODE_PASSIVE_DAC: | 8591 | case EDC_MODE_PASSIVE_DAC: |
8592 | case EDC_MODE_ACTIVE_DAC: | ||
8558 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; | 8593 | mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; |
8559 | break; | 8594 | break; |
8560 | default: | 8595 | default: |
@@ -9730,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, | |||
9730 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, | 9765 | MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, |
9731 | an_1000_val); | 9766 | an_1000_val); |
9732 | 9767 | ||
9733 | /* set 100 speed advertisement */ | 9768 | /* Set 10/100 speed advertisement */ |
9734 | if ((phy->req_line_speed == SPEED_AUTO_NEG) && | 9769 | if (phy->req_line_speed == SPEED_AUTO_NEG) { |
9735 | (phy->speed_cap_mask & | 9770 | if (phy->speed_cap_mask & |
9736 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | | 9771 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { |
9737 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { | 9772 | /* Enable autoneg and restart autoneg for legacy speeds |
9738 | an_10_100_val |= (1<<7); | 9773 | */ |
9739 | /* Enable autoneg and restart autoneg for legacy speeds */ | 9774 | autoneg_val |= (1<<9 | 1<<12); |
9740 | autoneg_val |= (1<<9 | 1<<12); | ||
9741 | |||
9742 | if (phy->req_duplex == DUPLEX_FULL) | ||
9743 | an_10_100_val |= (1<<8); | 9775 | an_10_100_val |= (1<<8); |
9744 | DP(NETIF_MSG_LINK, "Advertising 100M\n"); | 9776 | DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); |
9745 | } | 9777 | } |
9746 | /* set 10 speed advertisement */ | 9778 | |
9747 | if (((phy->req_line_speed == SPEED_AUTO_NEG) && | 9779 | if (phy->speed_cap_mask & |
9748 | (phy->speed_cap_mask & | 9780 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { |
9749 | (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | | 9781 | /* Enable autoneg and restart autoneg for legacy speeds |
9750 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && | 9782 | */ |
9751 | (phy->supported & | 9783 | autoneg_val |= (1<<9 | 1<<12); |
9752 | (SUPPORTED_10baseT_Half | | 9784 | an_10_100_val |= (1<<7); |
9753 | SUPPORTED_10baseT_Full)))) { | 9785 | DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); |
9754 | an_10_100_val |= (1<<5); | 9786 | } |
9755 | autoneg_val |= (1<<9 | 1<<12); | 9787 | |
9756 | if (phy->req_duplex == DUPLEX_FULL) | 9788 | if ((phy->speed_cap_mask & |
9789 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && | ||
9790 | (phy->supported & SUPPORTED_10baseT_Full)) { | ||
9757 | an_10_100_val |= (1<<6); | 9791 | an_10_100_val |= (1<<6); |
9758 | DP(NETIF_MSG_LINK, "Advertising 10M\n"); | 9792 | autoneg_val |= (1<<9 | 1<<12); |
9793 | DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); | ||
9794 | } | ||
9795 | |||
9796 | if ((phy->speed_cap_mask & | ||
9797 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && | ||
9798 | (phy->supported & SUPPORTED_10baseT_Half)) { | ||
9799 | an_10_100_val |= (1<<5); | ||
9800 | autoneg_val |= (1<<9 | 1<<12); | ||
9801 | DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); | ||
9802 | } | ||
9759 | } | 9803 | } |
9760 | 9804 | ||
9761 | /* Only 10/100 are allowed to work in FORCE mode */ | 9805 | /* Only 10/100 are allowed to work in FORCE mode */ |
@@ -13432,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, | |||
13432 | } | 13476 | } |
13433 | } | 13477 | } |
13434 | } | 13478 | } |
13435 | static void bnx2x_disable_kr2(struct link_params *params, | ||
13436 | struct link_vars *vars, | ||
13437 | struct bnx2x_phy *phy) | ||
13438 | { | ||
13439 | struct bnx2x *bp = params->bp; | ||
13440 | int i; | ||
13441 | static struct bnx2x_reg_set reg_set[] = { | ||
13442 | /* Step 1 - Program the TX/RX alignment markers */ | ||
13443 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, | ||
13444 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, | ||
13445 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, | ||
13446 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, | ||
13447 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, | ||
13448 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, | ||
13449 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, | ||
13450 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, | ||
13451 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, | ||
13452 | {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, | ||
13453 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, | ||
13454 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, | ||
13455 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, | ||
13456 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, | ||
13457 | {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} | ||
13458 | }; | ||
13459 | DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); | ||
13460 | |||
13461 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) | ||
13462 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, | ||
13463 | reg_set[i].val); | ||
13464 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; | ||
13465 | bnx2x_update_link_attr(params, vars->link_attr_sync); | ||
13466 | |||
13467 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; | ||
13468 | /* Restart AN on leading lane */ | ||
13469 | bnx2x_warpcore_restart_AN_KR(phy, params); | ||
13470 | } | ||
13471 | |||
13472 | static void bnx2x_kr2_recovery(struct link_params *params, | 13479 | static void bnx2x_kr2_recovery(struct link_params *params, |
13473 | struct link_vars *vars, | 13480 | struct link_vars *vars, |
13474 | struct bnx2x_phy *phy) | 13481 | struct bnx2x_phy *phy) |
@@ -13546,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13546 | /* Disable KR2 on both lanes */ | 13553 | /* Disable KR2 on both lanes */ |
13547 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); | 13554 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); |
13548 | bnx2x_disable_kr2(params, vars, phy); | 13555 | bnx2x_disable_kr2(params, vars, phy); |
13556 | /* Restart AN on leading lane */ | ||
13557 | bnx2x_warpcore_restart_AN_KR(phy, params); | ||
13549 | return; | 13558 | return; |
13550 | } | 13559 | } |
13551 | } | 13560 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a6704b555042..82b658d8c04c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -4703,6 +4703,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) | |||
4703 | attn.sig[3] = REG_RD(bp, | 4703 | attn.sig[3] = REG_RD(bp, |
4704 | MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + | 4704 | MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + |
4705 | port*4); | 4705 | port*4); |
4706 | /* Since MCP attentions can't be disabled inside the block, we need to | ||
4707 | * read AEU registers to see whether they're currently disabled | ||
4708 | */ | ||
4709 | attn.sig[3] &= ((REG_RD(bp, | ||
4710 | !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 | ||
4711 | : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & | ||
4712 | MISC_AEU_ENABLE_MCP_PRTY_BITS) | | ||
4713 | ~MISC_AEU_ENABLE_MCP_PRTY_BITS); | ||
4706 | 4714 | ||
4707 | if (!CHIP_IS_E1x(bp)) | 4715 | if (!CHIP_IS_E1x(bp)) |
4708 | attn.sig[4] = REG_RD(bp, | 4716 | attn.sig[4] = REG_RD(bp, |
@@ -5447,26 +5455,24 @@ static void bnx2x_timer(unsigned long data) | |||
5447 | if (IS_PF(bp) && | 5455 | if (IS_PF(bp) && |
5448 | !BP_NOMCP(bp)) { | 5456 | !BP_NOMCP(bp)) { |
5449 | int mb_idx = BP_FW_MB_IDX(bp); | 5457 | int mb_idx = BP_FW_MB_IDX(bp); |
5450 | u32 drv_pulse; | 5458 | u16 drv_pulse; |
5451 | u32 mcp_pulse; | 5459 | u16 mcp_pulse; |
5452 | 5460 | ||
5453 | ++bp->fw_drv_pulse_wr_seq; | 5461 | ++bp->fw_drv_pulse_wr_seq; |
5454 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; | 5462 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
5455 | /* TBD - add SYSTEM_TIME */ | ||
5456 | drv_pulse = bp->fw_drv_pulse_wr_seq; | 5463 | drv_pulse = bp->fw_drv_pulse_wr_seq; |
5457 | bnx2x_drv_pulse(bp); | 5464 | bnx2x_drv_pulse(bp); |
5458 | 5465 | ||
5459 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & | 5466 | mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & |
5460 | MCP_PULSE_SEQ_MASK); | 5467 | MCP_PULSE_SEQ_MASK); |
5461 | /* The delta between driver pulse and mcp response | 5468 | /* The delta between driver pulse and mcp response |
5462 | * should be 1 (before mcp response) or 0 (after mcp response) | 5469 | * should not get too big. If the MFW is more than 5 pulses |
5470 | * behind, we should worry about it enough to generate an error | ||
5471 | * log. | ||
5463 | */ | 5472 | */ |
5464 | if ((drv_pulse != mcp_pulse) && | 5473 | if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) |
5465 | (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { | 5474 | BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", |
5466 | /* someone lost a heartbeat... */ | ||
5467 | BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n", | ||
5468 | drv_pulse, mcp_pulse); | 5475 | drv_pulse, mcp_pulse); |
5469 | } | ||
5470 | } | 5476 | } |
5471 | 5477 | ||
5472 | if (bp->state == BNX2X_STATE_OPEN) | 5478 | if (bp->state == BNX2X_STATE_OPEN) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2604b6204abe..9ad012bdd915 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -1819,7 +1819,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) | |||
1819 | fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); | 1819 | fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); |
1820 | if (fid & IGU_FID_ENCODE_IS_PF) | 1820 | if (fid & IGU_FID_ENCODE_IS_PF) |
1821 | current_pf = fid & IGU_FID_PF_NUM_MASK; | 1821 | current_pf = fid & IGU_FID_PF_NUM_MASK; |
1822 | else if (current_pf == BP_ABS_FUNC(bp)) | 1822 | else if (current_pf == BP_FUNC(bp)) |
1823 | bnx2x_vf_set_igu_info(bp, sb_id, | 1823 | bnx2x_vf_set_igu_info(bp, sb_id, |
1824 | (fid & IGU_FID_VF_NUM_MASK)); | 1824 | (fid & IGU_FID_VF_NUM_MASK)); |
1825 | DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", | 1825 | DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", |
@@ -3180,6 +3180,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp) | |||
3180 | /* set local queue arrays */ | 3180 | /* set local queue arrays */ |
3181 | vf->vfqs = &bp->vfdb->vfqs[qcount]; | 3181 | vf->vfqs = &bp->vfdb->vfqs[qcount]; |
3182 | qcount += vf_sb_count(vf); | 3182 | qcount += vf_sb_count(vf); |
3183 | bnx2x_iov_static_resc(bp, vf); | ||
3183 | } | 3184 | } |
3184 | 3185 | ||
3185 | /* prepare msix vectors in VF configuration space */ | 3186 | /* prepare msix vectors in VF configuration space */ |
@@ -3187,6 +3188,8 @@ int bnx2x_enable_sriov(struct bnx2x *bp) | |||
3187 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); | 3188 | bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); |
3188 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, | 3189 | REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, |
3189 | num_vf_queues); | 3190 | num_vf_queues); |
3191 | DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", | ||
3192 | vf_idx, num_vf_queues); | ||
3190 | } | 3193 | } |
3191 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); | 3194 | bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); |
3192 | 3195 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 6cfb88732452..da16953eb2ec 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -1765,28 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1765 | switch (mbx->first_tlv.tl.type) { | 1765 | switch (mbx->first_tlv.tl.type) { |
1766 | case CHANNEL_TLV_ACQUIRE: | 1766 | case CHANNEL_TLV_ACQUIRE: |
1767 | bnx2x_vf_mbx_acquire(bp, vf, mbx); | 1767 | bnx2x_vf_mbx_acquire(bp, vf, mbx); |
1768 | break; | 1768 | return; |
1769 | case CHANNEL_TLV_INIT: | 1769 | case CHANNEL_TLV_INIT: |
1770 | bnx2x_vf_mbx_init_vf(bp, vf, mbx); | 1770 | bnx2x_vf_mbx_init_vf(bp, vf, mbx); |
1771 | break; | 1771 | return; |
1772 | case CHANNEL_TLV_SETUP_Q: | 1772 | case CHANNEL_TLV_SETUP_Q: |
1773 | bnx2x_vf_mbx_setup_q(bp, vf, mbx); | 1773 | bnx2x_vf_mbx_setup_q(bp, vf, mbx); |
1774 | break; | 1774 | return; |
1775 | case CHANNEL_TLV_SET_Q_FILTERS: | 1775 | case CHANNEL_TLV_SET_Q_FILTERS: |
1776 | bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); | 1776 | bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); |
1777 | break; | 1777 | return; |
1778 | case CHANNEL_TLV_TEARDOWN_Q: | 1778 | case CHANNEL_TLV_TEARDOWN_Q: |
1779 | bnx2x_vf_mbx_teardown_q(bp, vf, mbx); | 1779 | bnx2x_vf_mbx_teardown_q(bp, vf, mbx); |
1780 | break; | 1780 | return; |
1781 | case CHANNEL_TLV_CLOSE: | 1781 | case CHANNEL_TLV_CLOSE: |
1782 | bnx2x_vf_mbx_close_vf(bp, vf, mbx); | 1782 | bnx2x_vf_mbx_close_vf(bp, vf, mbx); |
1783 | break; | 1783 | return; |
1784 | case CHANNEL_TLV_RELEASE: | 1784 | case CHANNEL_TLV_RELEASE: |
1785 | bnx2x_vf_mbx_release_vf(bp, vf, mbx); | 1785 | bnx2x_vf_mbx_release_vf(bp, vf, mbx); |
1786 | break; | 1786 | return; |
1787 | case CHANNEL_TLV_UPDATE_RSS: | 1787 | case CHANNEL_TLV_UPDATE_RSS: |
1788 | bnx2x_vf_mbx_update_rss(bp, vf, mbx); | 1788 | bnx2x_vf_mbx_update_rss(bp, vf, mbx); |
1789 | break; | 1789 | return; |
1790 | } | 1790 | } |
1791 | 1791 | ||
1792 | } else { | 1792 | } else { |
@@ -1802,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, | |||
1802 | for (i = 0; i < 20; i++) | 1802 | for (i = 0; i < 20; i++) |
1803 | DP_CONT(BNX2X_MSG_IOV, "%x ", | 1803 | DP_CONT(BNX2X_MSG_IOV, "%x ", |
1804 | mbx->msg->req.tlv_buf_size.tlv_buffer[i]); | 1804 | mbx->msg->req.tlv_buf_size.tlv_buffer[i]); |
1805 | } | ||
1805 | 1806 | ||
1806 | /* test whether we can respond to the VF (do we have an address | 1807 | /* can we respond to VF (do we have an address for it?) */ |
1807 | * for it?) | 1808 | if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { |
1808 | */ | 1809 | /* mbx_resp uses the op_rc of the VF */ |
1809 | if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { | 1810 | vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; |
1810 | /* mbx_resp uses the op_rc of the VF */ | ||
1811 | vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; | ||
1812 | 1811 | ||
1813 | /* notify the VF that we do not support this request */ | 1812 | /* notify the VF that we do not support this request */ |
1814 | bnx2x_vf_mbx_resp(bp, vf); | 1813 | bnx2x_vf_mbx_resp(bp, vf); |
1815 | } else { | 1814 | } else { |
1816 | /* can't send a response since this VF is unknown to us | 1815 | /* can't send a response since this VF is unknown to us |
1817 | * just ack the FW to release the mailbox and unlock | 1816 | * just ack the FW to release the mailbox and unlock |
1818 | * the channel. | 1817 | * the channel. |
1819 | */ | 1818 | */ |
1820 | storm_memset_vf_mbx_ack(bp, vf->abs_vfid); | 1819 | storm_memset_vf_mbx_ack(bp, vf->abs_vfid); |
1821 | mmiowb(); | 1820 | /* Firmware ack should be written before unlocking channel */ |
1822 | bnx2x_unlock_vf_pf_channel(bp, vf, | 1821 | mmiowb(); |
1823 | mbx->first_tlv.tl.type); | 1822 | bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); |
1824 | } | ||
1825 | } | 1823 | } |
1826 | } | 1824 | } |
1827 | 1825 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index ace5050dba38..db020230bd0b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev) | |||
88 | #define BE_MIN_MTU 256 | 88 | #define BE_MIN_MTU 256 |
89 | 89 | ||
90 | #define BE_NUM_VLANS_SUPPORTED 64 | 90 | #define BE_NUM_VLANS_SUPPORTED 64 |
91 | #define BE_UMC_NUM_VLANS_SUPPORTED 15 | ||
91 | #define BE_MAX_EQD 96u | 92 | #define BE_MAX_EQD 96u |
92 | #define BE_MAX_TX_FRAG_COUNT 30 | 93 | #define BE_MAX_TX_FRAG_COUNT 30 |
93 | 94 | ||
@@ -333,6 +334,7 @@ enum vf_state { | |||
333 | 334 | ||
334 | #define BE_FLAGS_LINK_STATUS_INIT 1 | 335 | #define BE_FLAGS_LINK_STATUS_INIT 1 |
335 | #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) | 336 | #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) |
337 | #define BE_FLAGS_VLAN_PROMISC (1 << 4) | ||
336 | #define BE_FLAGS_NAPI_ENABLED (1 << 9) | 338 | #define BE_FLAGS_NAPI_ENABLED (1 << 9) |
337 | #define BE_UC_PMAC_COUNT 30 | 339 | #define BE_UC_PMAC_COUNT 30 |
338 | #define BE_VF_UC_PMAC_COUNT 2 | 340 | #define BE_VF_UC_PMAC_COUNT 2 |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1ab5dab11eff..bd0e0c0bbcd8 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
180 | dev_err(&adapter->pdev->dev, | 180 | dev_err(&adapter->pdev->dev, |
181 | "opcode %d-%d failed:status %d-%d\n", | 181 | "opcode %d-%d failed:status %d-%d\n", |
182 | opcode, subsystem, compl_status, extd_status); | 182 | opcode, subsystem, compl_status, extd_status); |
183 | |||
184 | if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) | ||
185 | return extd_status; | ||
183 | } | 186 | } |
184 | } | 187 | } |
185 | done: | 188 | done: |
@@ -1812,6 +1815,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) | |||
1812 | } else if (flags & IFF_ALLMULTI) { | 1815 | } else if (flags & IFF_ALLMULTI) { |
1813 | req->if_flags_mask = req->if_flags = | 1816 | req->if_flags_mask = req->if_flags = |
1814 | cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); | 1817 | cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); |
1818 | } else if (flags & BE_FLAGS_VLAN_PROMISC) { | ||
1819 | req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); | ||
1820 | |||
1821 | if (value == ON) | ||
1822 | req->if_flags = | ||
1823 | cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); | ||
1815 | } else { | 1824 | } else { |
1816 | struct netdev_hw_addr *ha; | 1825 | struct netdev_hw_addr *ha; |
1817 | int i = 0; | 1826 | int i = 0; |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d026226db88c..108ca8abf0af 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -60,6 +60,8 @@ enum { | |||
60 | MCC_STATUS_NOT_SUPPORTED = 66 | 60 | MCC_STATUS_NOT_SUPPORTED = 66 |
61 | }; | 61 | }; |
62 | 62 | ||
63 | #define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16 | ||
64 | |||
63 | #define CQE_STATUS_COMPL_MASK 0xFFFF | 65 | #define CQE_STATUS_COMPL_MASK 0xFFFF |
64 | #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ | 66 | #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ |
65 | #define CQE_STATUS_EXTD_MASK 0xFFFF | 67 | #define CQE_STATUS_EXTD_MASK 0xFFFF |
@@ -1791,7 +1793,7 @@ struct be_nic_res_desc { | |||
1791 | u8 acpi_params; | 1793 | u8 acpi_params; |
1792 | u8 wol_param; | 1794 | u8 wol_param; |
1793 | u16 rsvd7; | 1795 | u16 rsvd7; |
1794 | u32 rsvd8[3]; | 1796 | u32 rsvd8[7]; |
1795 | } __packed; | 1797 | } __packed; |
1796 | 1798 | ||
1797 | struct be_cmd_req_get_func_config { | 1799 | struct be_cmd_req_get_func_config { |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 100b528b9bd0..2c38cc402119 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -855,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | |||
855 | unsigned int eth_hdr_len; | 855 | unsigned int eth_hdr_len; |
856 | struct iphdr *ip; | 856 | struct iphdr *ip; |
857 | 857 | ||
858 | /* Lancer ASIC has a bug wherein packets that are 32 bytes or less | 858 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less |
859 | * may cause a transmit stall on that port. So the work-around is to | 859 | * may cause a transmit stall on that port. So the work-around is to |
860 | * pad such packets to a 36-byte length. | 860 | * pad short packets (<= 32 bytes) to a 36-byte length. |
861 | */ | 861 | */ |
862 | if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { | 862 | if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { |
863 | if (skb_padto(skb, 36)) | 863 | if (skb_padto(skb, 36)) |
864 | goto tx_drop; | 864 | goto tx_drop; |
865 | skb->len = 36; | 865 | skb->len = 36; |
@@ -1013,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter) | |||
1013 | status = be_cmd_vlan_config(adapter, adapter->if_handle, | 1013 | status = be_cmd_vlan_config(adapter, adapter->if_handle, |
1014 | vids, num, 1, 0); | 1014 | vids, num, 1, 0); |
1015 | 1015 | ||
1016 | /* Set to VLAN promisc mode as setting VLAN filter failed */ | ||
1017 | if (status) { | 1016 | if (status) { |
1018 | dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); | 1017 | /* Set to VLAN promisc mode as setting VLAN filter failed */ |
1019 | dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); | 1018 | if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) |
1020 | goto set_vlan_promisc; | 1019 | goto set_vlan_promisc; |
1020 | dev_err(&adapter->pdev->dev, | ||
1021 | "Setting HW VLAN filtering failed.\n"); | ||
1022 | } else { | ||
1023 | if (adapter->flags & BE_FLAGS_VLAN_PROMISC) { | ||
1024 | /* hw VLAN filtering re-enabled. */ | ||
1025 | status = be_cmd_rx_filter(adapter, | ||
1026 | BE_FLAGS_VLAN_PROMISC, OFF); | ||
1027 | if (!status) { | ||
1028 | dev_info(&adapter->pdev->dev, | ||
1029 | "Disabling VLAN Promiscuous mode.\n"); | ||
1030 | adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; | ||
1031 | dev_info(&adapter->pdev->dev, | ||
1032 | "Re-Enabling HW VLAN filtering\n"); | ||
1033 | } | ||
1034 | } | ||
1021 | } | 1035 | } |
1022 | 1036 | ||
1023 | return status; | 1037 | return status; |
1024 | 1038 | ||
1025 | set_vlan_promisc: | 1039 | set_vlan_promisc: |
1026 | status = be_cmd_vlan_config(adapter, adapter->if_handle, | 1040 | dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); |
1027 | NULL, 0, 1, 1); | 1041 | |
1042 | status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON); | ||
1043 | if (!status) { | ||
1044 | dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n"); | ||
1045 | dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n"); | ||
1046 | adapter->flags |= BE_FLAGS_VLAN_PROMISC; | ||
1047 | } else | ||
1048 | dev_err(&adapter->pdev->dev, | ||
1049 | "Failed to enable VLAN Promiscuous mode.\n"); | ||
1028 | return status; | 1050 | return status; |
1029 | } | 1051 | } |
1030 | 1052 | ||
@@ -1033,10 +1055,6 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
1033 | struct be_adapter *adapter = netdev_priv(netdev); | 1055 | struct be_adapter *adapter = netdev_priv(netdev); |
1034 | int status = 0; | 1056 | int status = 0; |
1035 | 1057 | ||
1036 | if (!lancer_chip(adapter) && !be_physfn(adapter)) { | ||
1037 | status = -EINVAL; | ||
1038 | goto ret; | ||
1039 | } | ||
1040 | 1058 | ||
1041 | /* Packets with VID 0 are always received by Lancer by default */ | 1059 | /* Packets with VID 0 are always received by Lancer by default */ |
1042 | if (lancer_chip(adapter) && vid == 0) | 1060 | if (lancer_chip(adapter) && vid == 0) |
@@ -1059,11 +1077,6 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
1059 | struct be_adapter *adapter = netdev_priv(netdev); | 1077 | struct be_adapter *adapter = netdev_priv(netdev); |
1060 | int status = 0; | 1078 | int status = 0; |
1061 | 1079 | ||
1062 | if (!lancer_chip(adapter) && !be_physfn(adapter)) { | ||
1063 | status = -EINVAL; | ||
1064 | goto ret; | ||
1065 | } | ||
1066 | |||
1067 | /* Packets with VID 0 are always received by Lancer by default */ | 1080 | /* Packets with VID 0 are always received by Lancer by default */ |
1068 | if (lancer_chip(adapter) && vid == 0) | 1081 | if (lancer_chip(adapter) && vid == 0) |
1069 | goto ret; | 1082 | goto ret; |
@@ -1188,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf, | |||
1188 | 1201 | ||
1189 | vi->vf = vf; | 1202 | vi->vf = vf; |
1190 | vi->tx_rate = vf_cfg->tx_rate; | 1203 | vi->tx_rate = vf_cfg->tx_rate; |
1191 | vi->vlan = vf_cfg->vlan_tag; | 1204 | vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; |
1192 | vi->qos = 0; | 1205 | vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; |
1193 | memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); | 1206 | memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); |
1194 | 1207 | ||
1195 | return 0; | 1208 | return 0; |
@@ -1199,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev, | |||
1199 | int vf, u16 vlan, u8 qos) | 1212 | int vf, u16 vlan, u8 qos) |
1200 | { | 1213 | { |
1201 | struct be_adapter *adapter = netdev_priv(netdev); | 1214 | struct be_adapter *adapter = netdev_priv(netdev); |
1215 | struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; | ||
1202 | int status = 0; | 1216 | int status = 0; |
1203 | 1217 | ||
1204 | if (!sriov_enabled(adapter)) | 1218 | if (!sriov_enabled(adapter)) |
1205 | return -EPERM; | 1219 | return -EPERM; |
1206 | 1220 | ||
1207 | if (vf >= adapter->num_vfs || vlan > 4095) | 1221 | if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7) |
1208 | return -EINVAL; | 1222 | return -EINVAL; |
1209 | 1223 | ||
1210 | if (vlan) { | 1224 | if (vlan || qos) { |
1211 | if (adapter->vf_cfg[vf].vlan_tag != vlan) { | 1225 | vlan |= qos << VLAN_PRIO_SHIFT; |
1226 | if (vf_cfg->vlan_tag != vlan) { | ||
1212 | /* If this is new value, program it. Else skip. */ | 1227 | /* If this is new value, program it. Else skip. */ |
1213 | adapter->vf_cfg[vf].vlan_tag = vlan; | 1228 | vf_cfg->vlan_tag = vlan; |
1214 | 1229 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | |
1215 | status = be_cmd_set_hsw_config(adapter, vlan, | 1230 | vf_cfg->if_handle, 0); |
1216 | vf + 1, adapter->vf_cfg[vf].if_handle, 0); | ||
1217 | } | 1231 | } |
1218 | } else { | 1232 | } else { |
1219 | /* Reset Transparent Vlan Tagging. */ | 1233 | /* Reset Transparent Vlan Tagging. */ |
1220 | adapter->vf_cfg[vf].vlan_tag = 0; | 1234 | vf_cfg->vlan_tag = 0; |
1221 | vlan = adapter->vf_cfg[vf].def_vid; | 1235 | vlan = vf_cfg->def_vid; |
1222 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, | 1236 | status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, |
1223 | adapter->vf_cfg[vf].if_handle, 0); | 1237 | vf_cfg->if_handle, 0); |
1224 | } | 1238 | } |
1225 | 1239 | ||
1226 | 1240 | ||
@@ -2963,6 +2977,8 @@ static void BEx_get_resources(struct be_adapter *adapter, | |||
2963 | 2977 | ||
2964 | if (adapter->function_mode & FLEX10_MODE) | 2978 | if (adapter->function_mode & FLEX10_MODE) |
2965 | res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; | 2979 | res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; |
2980 | else if (adapter->function_mode & UMC_ENABLED) | ||
2981 | res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED; | ||
2966 | else | 2982 | else |
2967 | res->max_vlans = BE_NUM_VLANS_SUPPORTED; | 2983 | res->max_vlans = BE_NUM_VLANS_SUPPORTED; |
2968 | res->max_mcast_mac = BE_MAX_MC; | 2984 | res->max_mcast_mac = BE_MAX_MC; |
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 098f133908ae..e006a09ba899 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c | |||
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev) | |||
452 | err = -ENODEV; | 452 | err = -ENODEV; |
453 | 453 | ||
454 | etsects->caps = ptp_gianfar_caps; | 454 | etsects->caps = ptp_gianfar_caps; |
455 | etsects->cksel = DEFAULT_CKSEL; | 455 | |
456 | if (get_of_u32(node, "fsl,cksel", &etsects->cksel)) | ||
457 | etsects->cksel = DEFAULT_CKSEL; | ||
456 | 458 | ||
457 | if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || | 459 | if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || |
458 | get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || | 460 | get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 0c524fa9f811..cfef7fc32cdd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c | |||
@@ -701,8 +701,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, | |||
701 | 701 | ||
702 | details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); | 702 | details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); |
703 | if (cmd_details) { | 703 | if (cmd_details) { |
704 | memcpy(details, cmd_details, | 704 | *details = *cmd_details; |
705 | sizeof(struct i40e_asq_cmd_details)); | ||
706 | 705 | ||
707 | /* If the cmd_details are defined copy the cookie. The | 706 | /* If the cmd_details are defined copy the cookie. The |
708 | * cpu_to_le32 is not needed here because the data is ignored | 707 | * cpu_to_le32 is not needed here because the data is ignored |
@@ -760,7 +759,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, | |||
760 | desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); | 759 | desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); |
761 | 760 | ||
762 | /* if the desc is available copy the temp desc to the right place */ | 761 | /* if the desc is available copy the temp desc to the right place */ |
763 | memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc)); | 762 | *desc_on_ring = *desc; |
764 | 763 | ||
765 | /* if buff is not NULL assume indirect command */ | 764 | /* if buff is not NULL assume indirect command */ |
766 | if (buff != NULL) { | 765 | if (buff != NULL) { |
@@ -807,7 +806,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, | |||
807 | 806 | ||
808 | /* if ready, copy the desc back to temp */ | 807 | /* if ready, copy the desc back to temp */ |
809 | if (i40e_asq_done(hw)) { | 808 | if (i40e_asq_done(hw)) { |
810 | memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc)); | 809 | *desc = *desc_on_ring; |
811 | if (buff != NULL) | 810 | if (buff != NULL) |
812 | memcpy(buff, dma_buff->va, buff_size); | 811 | memcpy(buff, dma_buff->va, buff_size); |
813 | retval = le16_to_cpu(desc->retval); | 812 | retval = le16_to_cpu(desc->retval); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index c21df7bc3b1d..1e4ea134975a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c | |||
@@ -507,7 +507,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, | |||
507 | 507 | ||
508 | /* save link status information */ | 508 | /* save link status information */ |
509 | if (link) | 509 | if (link) |
510 | memcpy(link, hw_link_info, sizeof(struct i40e_link_status)); | 510 | *link = *hw_link_info; |
511 | 511 | ||
512 | /* flag cleared so helper functions don't call AQ again */ | 512 | /* flag cleared so helper functions don't call AQ again */ |
513 | hw->phy.get_link_info = false; | 513 | hw->phy.get_link_info = false; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 601d482694ea..221aa4795017 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -101,10 +101,10 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, | |||
101 | mem->size = ALIGN(size, alignment); | 101 | mem->size = ALIGN(size, alignment); |
102 | mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, | 102 | mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, |
103 | &mem->pa, GFP_KERNEL); | 103 | &mem->pa, GFP_KERNEL); |
104 | if (mem->va) | 104 | if (!mem->va) |
105 | return 0; | 105 | return -ENOMEM; |
106 | 106 | ||
107 | return -ENOMEM; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
@@ -136,10 +136,10 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, | |||
136 | mem->size = size; | 136 | mem->size = size; |
137 | mem->va = kzalloc(size, GFP_KERNEL); | 137 | mem->va = kzalloc(size, GFP_KERNEL); |
138 | 138 | ||
139 | if (mem->va) | 139 | if (!mem->va) |
140 | return 0; | 140 | return -ENOMEM; |
141 | 141 | ||
142 | return -ENOMEM; | 142 | return 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | /** | 145 | /** |
@@ -174,8 +174,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, | |||
174 | u16 needed, u16 id) | 174 | u16 needed, u16 id) |
175 | { | 175 | { |
176 | int ret = -ENOMEM; | 176 | int ret = -ENOMEM; |
177 | int i = 0; | 177 | int i, j; |
178 | int j = 0; | ||
179 | 178 | ||
180 | if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { | 179 | if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { |
181 | dev_info(&pf->pdev->dev, | 180 | dev_info(&pf->pdev->dev, |
@@ -186,7 +185,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, | |||
186 | 185 | ||
187 | /* start the linear search with an imperfect hint */ | 186 | /* start the linear search with an imperfect hint */ |
188 | i = pile->search_hint; | 187 | i = pile->search_hint; |
189 | while (i < pile->num_entries && ret < 0) { | 188 | while (i < pile->num_entries) { |
190 | /* skip already allocated entries */ | 189 | /* skip already allocated entries */ |
191 | if (pile->list[i] & I40E_PILE_VALID_BIT) { | 190 | if (pile->list[i] & I40E_PILE_VALID_BIT) { |
192 | i++; | 191 | i++; |
@@ -205,6 +204,7 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, | |||
205 | pile->list[i+j] = id | I40E_PILE_VALID_BIT; | 204 | pile->list[i+j] = id | I40E_PILE_VALID_BIT; |
206 | ret = i; | 205 | ret = i; |
207 | pile->search_hint = i + j; | 206 | pile->search_hint = i + j; |
207 | break; | ||
208 | } else { | 208 | } else { |
209 | /* not enough, so skip over it and continue looking */ | 209 | /* not enough, so skip over it and continue looking */ |
210 | i += j; | 210 | i += j; |
@@ -1388,7 +1388,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1388 | bool add_happened = false; | 1388 | bool add_happened = false; |
1389 | int filter_list_len = 0; | 1389 | int filter_list_len = 0; |
1390 | u32 changed_flags = 0; | 1390 | u32 changed_flags = 0; |
1391 | i40e_status ret = 0; | 1391 | i40e_status aq_ret = 0; |
1392 | struct i40e_pf *pf; | 1392 | struct i40e_pf *pf; |
1393 | int num_add = 0; | 1393 | int num_add = 0; |
1394 | int num_del = 0; | 1394 | int num_del = 0; |
@@ -1449,28 +1449,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1449 | 1449 | ||
1450 | /* flush a full buffer */ | 1450 | /* flush a full buffer */ |
1451 | if (num_del == filter_list_len) { | 1451 | if (num_del == filter_list_len) { |
1452 | ret = i40e_aq_remove_macvlan(&pf->hw, | 1452 | aq_ret = i40e_aq_remove_macvlan(&pf->hw, |
1453 | vsi->seid, del_list, num_del, | 1453 | vsi->seid, del_list, num_del, |
1454 | NULL); | 1454 | NULL); |
1455 | num_del = 0; | 1455 | num_del = 0; |
1456 | memset(del_list, 0, sizeof(*del_list)); | 1456 | memset(del_list, 0, sizeof(*del_list)); |
1457 | 1457 | ||
1458 | if (ret) | 1458 | if (aq_ret) |
1459 | dev_info(&pf->pdev->dev, | 1459 | dev_info(&pf->pdev->dev, |
1460 | "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", | 1460 | "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", |
1461 | ret, | 1461 | aq_ret, |
1462 | pf->hw.aq.asq_last_status); | 1462 | pf->hw.aq.asq_last_status); |
1463 | } | 1463 | } |
1464 | } | 1464 | } |
1465 | if (num_del) { | 1465 | if (num_del) { |
1466 | ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, | 1466 | aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, |
1467 | del_list, num_del, NULL); | 1467 | del_list, num_del, NULL); |
1468 | num_del = 0; | 1468 | num_del = 0; |
1469 | 1469 | ||
1470 | if (ret) | 1470 | if (aq_ret) |
1471 | dev_info(&pf->pdev->dev, | 1471 | dev_info(&pf->pdev->dev, |
1472 | "ignoring delete macvlan error, err %d, aq_err %d\n", | 1472 | "ignoring delete macvlan error, err %d, aq_err %d\n", |
1473 | ret, pf->hw.aq.asq_last_status); | 1473 | aq_ret, pf->hw.aq.asq_last_status); |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | kfree(del_list); | 1476 | kfree(del_list); |
@@ -1515,32 +1515,30 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1515 | 1515 | ||
1516 | /* flush a full buffer */ | 1516 | /* flush a full buffer */ |
1517 | if (num_add == filter_list_len) { | 1517 | if (num_add == filter_list_len) { |
1518 | ret = i40e_aq_add_macvlan(&pf->hw, | 1518 | aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, |
1519 | vsi->seid, | 1519 | add_list, num_add, |
1520 | add_list, | 1520 | NULL); |
1521 | num_add, | ||
1522 | NULL); | ||
1523 | num_add = 0; | 1521 | num_add = 0; |
1524 | 1522 | ||
1525 | if (ret) | 1523 | if (aq_ret) |
1526 | break; | 1524 | break; |
1527 | memset(add_list, 0, sizeof(*add_list)); | 1525 | memset(add_list, 0, sizeof(*add_list)); |
1528 | } | 1526 | } |
1529 | } | 1527 | } |
1530 | if (num_add) { | 1528 | if (num_add) { |
1531 | ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, | 1529 | aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, |
1532 | add_list, num_add, NULL); | 1530 | add_list, num_add, NULL); |
1533 | num_add = 0; | 1531 | num_add = 0; |
1534 | } | 1532 | } |
1535 | kfree(add_list); | 1533 | kfree(add_list); |
1536 | add_list = NULL; | 1534 | add_list = NULL; |
1537 | 1535 | ||
1538 | if (add_happened && (!ret)) { | 1536 | if (add_happened && (!aq_ret)) { |
1539 | /* do nothing */; | 1537 | /* do nothing */; |
1540 | } else if (add_happened && (ret)) { | 1538 | } else if (add_happened && (aq_ret)) { |
1541 | dev_info(&pf->pdev->dev, | 1539 | dev_info(&pf->pdev->dev, |
1542 | "add filter failed, err %d, aq_err %d\n", | 1540 | "add filter failed, err %d, aq_err %d\n", |
1543 | ret, pf->hw.aq.asq_last_status); | 1541 | aq_ret, pf->hw.aq.asq_last_status); |
1544 | if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && | 1542 | if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && |
1545 | !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, | 1543 | !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, |
1546 | &vsi->state)) { | 1544 | &vsi->state)) { |
@@ -1556,28 +1554,27 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) | |||
1556 | if (changed_flags & IFF_ALLMULTI) { | 1554 | if (changed_flags & IFF_ALLMULTI) { |
1557 | bool cur_multipromisc; | 1555 | bool cur_multipromisc; |
1558 | cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); | 1556 | cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); |
1559 | ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, | 1557 | aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, |
1560 | vsi->seid, | 1558 | vsi->seid, |
1561 | cur_multipromisc, | 1559 | cur_multipromisc, |
1562 | NULL); | 1560 | NULL); |
1563 | if (ret) | 1561 | if (aq_ret) |
1564 | dev_info(&pf->pdev->dev, | 1562 | dev_info(&pf->pdev->dev, |
1565 | "set multi promisc failed, err %d, aq_err %d\n", | 1563 | "set multi promisc failed, err %d, aq_err %d\n", |
1566 | ret, pf->hw.aq.asq_last_status); | 1564 | aq_ret, pf->hw.aq.asq_last_status); |
1567 | } | 1565 | } |
1568 | if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { | 1566 | if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { |
1569 | bool cur_promisc; | 1567 | bool cur_promisc; |
1570 | cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || | 1568 | cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || |
1571 | test_bit(__I40E_FILTER_OVERFLOW_PROMISC, | 1569 | test_bit(__I40E_FILTER_OVERFLOW_PROMISC, |
1572 | &vsi->state)); | 1570 | &vsi->state)); |
1573 | ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, | 1571 | aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, |
1574 | vsi->seid, | 1572 | vsi->seid, |
1575 | cur_promisc, | 1573 | cur_promisc, NULL); |
1576 | NULL); | 1574 | if (aq_ret) |
1577 | if (ret) | ||
1578 | dev_info(&pf->pdev->dev, | 1575 | dev_info(&pf->pdev->dev, |
1579 | "set uni promisc failed, err %d, aq_err %d\n", | 1576 | "set uni promisc failed, err %d, aq_err %d\n", |
1580 | ret, pf->hw.aq.asq_last_status); | 1577 | aq_ret, pf->hw.aq.asq_last_status); |
1581 | } | 1578 | } |
1582 | 1579 | ||
1583 | clear_bit(__I40E_CONFIG_BUSY, &vsi->state); | 1580 | clear_bit(__I40E_CONFIG_BUSY, &vsi->state); |
@@ -1790,6 +1787,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) | |||
1790 | * i40e_vsi_kill_vlan - Remove vsi membership for given vlan | 1787 | * i40e_vsi_kill_vlan - Remove vsi membership for given vlan |
1791 | * @vsi: the vsi being configured | 1788 | * @vsi: the vsi being configured |
1792 | * @vid: vlan id to be removed (0 = untagged only , -1 = any) | 1789 | * @vid: vlan id to be removed (0 = untagged only , -1 = any) |
1790 | * | ||
1791 | * Return: 0 on success or negative otherwise | ||
1793 | **/ | 1792 | **/ |
1794 | int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) | 1793 | int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) |
1795 | { | 1794 | { |
@@ -1863,37 +1862,39 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) | |||
1863 | * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload | 1862 | * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload |
1864 | * @netdev: network interface to be adjusted | 1863 | * @netdev: network interface to be adjusted |
1865 | * @vid: vlan id to be added | 1864 | * @vid: vlan id to be added |
1865 | * | ||
1866 | * net_device_ops implementation for adding vlan ids | ||
1866 | **/ | 1867 | **/ |
1867 | static int i40e_vlan_rx_add_vid(struct net_device *netdev, | 1868 | static int i40e_vlan_rx_add_vid(struct net_device *netdev, |
1868 | __always_unused __be16 proto, u16 vid) | 1869 | __always_unused __be16 proto, u16 vid) |
1869 | { | 1870 | { |
1870 | struct i40e_netdev_priv *np = netdev_priv(netdev); | 1871 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
1871 | struct i40e_vsi *vsi = np->vsi; | 1872 | struct i40e_vsi *vsi = np->vsi; |
1872 | int ret; | 1873 | int ret = 0; |
1873 | 1874 | ||
1874 | if (vid > 4095) | 1875 | if (vid > 4095) |
1875 | return 0; | 1876 | return -EINVAL; |
1877 | |||
1878 | netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); | ||
1876 | 1879 | ||
1877 | netdev_info(vsi->netdev, "adding %pM vid=%d\n", | ||
1878 | netdev->dev_addr, vid); | ||
1879 | /* If the network stack called us with vid = 0, we should | 1880 | /* If the network stack called us with vid = 0, we should |
1880 | * indicate to i40e_vsi_add_vlan() that we want to receive | 1881 | * indicate to i40e_vsi_add_vlan() that we want to receive |
1881 | * any traffic (i.e. with any vlan tag, or untagged) | 1882 | * any traffic (i.e. with any vlan tag, or untagged) |
1882 | */ | 1883 | */ |
1883 | ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); | 1884 | ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); |
1884 | 1885 | ||
1885 | if (!ret) { | 1886 | if (!ret && (vid < VLAN_N_VID)) |
1886 | if (vid < VLAN_N_VID) | 1887 | set_bit(vid, vsi->active_vlans); |
1887 | set_bit(vid, vsi->active_vlans); | ||
1888 | } | ||
1889 | 1888 | ||
1890 | return 0; | 1889 | return ret; |
1891 | } | 1890 | } |
1892 | 1891 | ||
1893 | /** | 1892 | /** |
1894 | * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload | 1893 | * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload |
1895 | * @netdev: network interface to be adjusted | 1894 | * @netdev: network interface to be adjusted |
1896 | * @vid: vlan id to be removed | 1895 | * @vid: vlan id to be removed |
1896 | * | ||
1897 | * net_device_ops implementation for adding vlan ids | ||
1897 | **/ | 1898 | **/ |
1898 | static int i40e_vlan_rx_kill_vid(struct net_device *netdev, | 1899 | static int i40e_vlan_rx_kill_vid(struct net_device *netdev, |
1899 | __always_unused __be16 proto, u16 vid) | 1900 | __always_unused __be16 proto, u16 vid) |
@@ -1901,15 +1902,16 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, | |||
1901 | struct i40e_netdev_priv *np = netdev_priv(netdev); | 1902 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
1902 | struct i40e_vsi *vsi = np->vsi; | 1903 | struct i40e_vsi *vsi = np->vsi; |
1903 | 1904 | ||
1904 | netdev_info(vsi->netdev, "removing %pM vid=%d\n", | 1905 | netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); |
1905 | netdev->dev_addr, vid); | 1906 | |
1906 | /* return code is ignored as there is nothing a user | 1907 | /* return code is ignored as there is nothing a user |
1907 | * can do about failure to remove and a log message was | 1908 | * can do about failure to remove and a log message was |
1908 | * already printed from another function | 1909 | * already printed from the other function |
1909 | */ | 1910 | */ |
1910 | i40e_vsi_kill_vlan(vsi, vid); | 1911 | i40e_vsi_kill_vlan(vsi, vid); |
1911 | 1912 | ||
1912 | clear_bit(vid, vsi->active_vlans); | 1913 | clear_bit(vid, vsi->active_vlans); |
1914 | |||
1913 | return 0; | 1915 | return 0; |
1914 | } | 1916 | } |
1915 | 1917 | ||
@@ -1936,10 +1938,10 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) | |||
1936 | * @vsi: the vsi being adjusted | 1938 | * @vsi: the vsi being adjusted |
1937 | * @vid: the vlan id to set as a PVID | 1939 | * @vid: the vlan id to set as a PVID |
1938 | **/ | 1940 | **/ |
1939 | i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) | 1941 | int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) |
1940 | { | 1942 | { |
1941 | struct i40e_vsi_context ctxt; | 1943 | struct i40e_vsi_context ctxt; |
1942 | i40e_status ret; | 1944 | i40e_status aq_ret; |
1943 | 1945 | ||
1944 | vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); | 1946 | vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); |
1945 | vsi->info.pvid = cpu_to_le16(vid); | 1947 | vsi->info.pvid = cpu_to_le16(vid); |
@@ -1948,14 +1950,15 @@ i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) | |||
1948 | 1950 | ||
1949 | ctxt.seid = vsi->seid; | 1951 | ctxt.seid = vsi->seid; |
1950 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); | 1952 | memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); |
1951 | ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); | 1953 | aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); |
1952 | if (ret) { | 1954 | if (aq_ret) { |
1953 | dev_info(&vsi->back->pdev->dev, | 1955 | dev_info(&vsi->back->pdev->dev, |
1954 | "%s: update vsi failed, aq_err=%d\n", | 1956 | "%s: update vsi failed, aq_err=%d\n", |
1955 | __func__, vsi->back->hw.aq.asq_last_status); | 1957 | __func__, vsi->back->hw.aq.asq_last_status); |
1958 | return -ENOENT; | ||
1956 | } | 1959 | } |
1957 | 1960 | ||
1958 | return ret; | 1961 | return 0; |
1959 | } | 1962 | } |
1960 | 1963 | ||
1961 | /** | 1964 | /** |
@@ -3326,7 +3329,8 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) | |||
3326 | **/ | 3329 | **/ |
3327 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | 3330 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) |
3328 | { | 3331 | { |
3329 | int num_tc = 0, i; | 3332 | u8 num_tc = 0; |
3333 | int i; | ||
3330 | 3334 | ||
3331 | /* Scan the ETS Config Priority Table to find | 3335 | /* Scan the ETS Config Priority Table to find |
3332 | * traffic class enabled for a given priority | 3336 | * traffic class enabled for a given priority |
@@ -3341,9 +3345,7 @@ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | |||
3341 | /* Traffic class index starts from zero so | 3345 | /* Traffic class index starts from zero so |
3342 | * increment to return the actual count | 3346 | * increment to return the actual count |
3343 | */ | 3347 | */ |
3344 | num_tc++; | 3348 | return num_tc + 1; |
3345 | |||
3346 | return num_tc; | ||
3347 | } | 3349 | } |
3348 | 3350 | ||
3349 | /** | 3351 | /** |
@@ -3451,28 +3453,27 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) | |||
3451 | struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; | 3453 | struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; |
3452 | struct i40e_pf *pf = vsi->back; | 3454 | struct i40e_pf *pf = vsi->back; |
3453 | struct i40e_hw *hw = &pf->hw; | 3455 | struct i40e_hw *hw = &pf->hw; |
3456 | i40e_status aq_ret; | ||
3454 | u32 tc_bw_max; | 3457 | u32 tc_bw_max; |
3455 | int ret; | ||
3456 | int i; | 3458 | int i; |
3457 | 3459 | ||
3458 | /* Get the VSI level BW configuration */ | 3460 | /* Get the VSI level BW configuration */ |
3459 | ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); | 3461 | aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); |
3460 | if (ret) { | 3462 | if (aq_ret) { |
3461 | dev_info(&pf->pdev->dev, | 3463 | dev_info(&pf->pdev->dev, |
3462 | "couldn't get pf vsi bw config, err %d, aq_err %d\n", | 3464 | "couldn't get pf vsi bw config, err %d, aq_err %d\n", |
3463 | ret, pf->hw.aq.asq_last_status); | 3465 | aq_ret, pf->hw.aq.asq_last_status); |
3464 | return ret; | 3466 | return -EINVAL; |
3465 | } | 3467 | } |
3466 | 3468 | ||
3467 | /* Get the VSI level BW configuration per TC */ | 3469 | /* Get the VSI level BW configuration per TC */ |
3468 | ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, | 3470 | aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, |
3469 | &bw_ets_config, | 3471 | NULL); |
3470 | NULL); | 3472 | if (aq_ret) { |
3471 | if (ret) { | ||
3472 | dev_info(&pf->pdev->dev, | 3473 | dev_info(&pf->pdev->dev, |
3473 | "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", | 3474 | "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", |
3474 | ret, pf->hw.aq.asq_last_status); | 3475 | aq_ret, pf->hw.aq.asq_last_status); |
3475 | return ret; | 3476 | return -EINVAL; |
3476 | } | 3477 | } |
3477 | 3478 | ||
3478 | if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { | 3479 | if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { |
@@ -3494,7 +3495,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) | |||
3494 | /* 3 bits out of 4 for each TC */ | 3495 | /* 3 bits out of 4 for each TC */ |
3495 | vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); | 3496 | vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); |
3496 | } | 3497 | } |
3497 | return ret; | 3498 | |
3499 | return 0; | ||
3498 | } | 3500 | } |
3499 | 3501 | ||
3500 | /** | 3502 | /** |
@@ -3505,30 +3507,30 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) | |||
3505 | * | 3507 | * |
3506 | * Returns 0 on success, negative value on failure | 3508 | * Returns 0 on success, negative value on failure |
3507 | **/ | 3509 | **/ |
3508 | static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, | 3510 | static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, |
3509 | u8 enabled_tc, | ||
3510 | u8 *bw_share) | 3511 | u8 *bw_share) |
3511 | { | 3512 | { |
3512 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; | 3513 | struct i40e_aqc_configure_vsi_tc_bw_data bw_data; |
3513 | int i, ret = 0; | 3514 | i40e_status aq_ret; |
3515 | int i; | ||
3514 | 3516 | ||
3515 | bw_data.tc_valid_bits = enabled_tc; | 3517 | bw_data.tc_valid_bits = enabled_tc; |
3516 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | 3518 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) |
3517 | bw_data.tc_bw_credits[i] = bw_share[i]; | 3519 | bw_data.tc_bw_credits[i] = bw_share[i]; |
3518 | 3520 | ||
3519 | ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, | 3521 | aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, |
3520 | &bw_data, NULL); | 3522 | NULL); |
3521 | if (ret) { | 3523 | if (aq_ret) { |
3522 | dev_info(&vsi->back->pdev->dev, | 3524 | dev_info(&vsi->back->pdev->dev, |
3523 | "%s: AQ command Config VSI BW allocation per TC failed = %d\n", | 3525 | "%s: AQ command Config VSI BW allocation per TC failed = %d\n", |
3524 | __func__, vsi->back->hw.aq.asq_last_status); | 3526 | __func__, vsi->back->hw.aq.asq_last_status); |
3525 | return ret; | 3527 | return -EINVAL; |
3526 | } | 3528 | } |
3527 | 3529 | ||
3528 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) | 3530 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) |
3529 | vsi->info.qs_handle[i] = bw_data.qs_handles[i]; | 3531 | vsi->info.qs_handle[i] = bw_data.qs_handles[i]; |
3530 | 3532 | ||
3531 | return ret; | 3533 | return 0; |
3532 | } | 3534 | } |
3533 | 3535 | ||
3534 | /** | 3536 | /** |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 48cbc833b051..86d51429a189 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -1607,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) | |||
1607 | igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); | 1607 | igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); |
1608 | igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); | 1608 | igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); |
1609 | } | 1609 | } |
1610 | } else if (hw->phy.type == e1000_phy_82580) { | ||
1611 | /* enable MII loopback */ | ||
1612 | igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); | ||
1610 | } | 1613 | } |
1611 | 1614 | ||
1612 | /* add small delay to avoid loopback test failure */ | 1615 | /* add small delay to avoid loopback test failure */ |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 1a9c4f6269ea..ecc7f7b696b8 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -3086,13 +3086,16 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3086 | PCI_DMA_FROMDEVICE); | 3086 | PCI_DMA_FROMDEVICE); |
3087 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
3088 | } else { | 3088 | } else { |
3089 | struct skge_element ee; | ||
3089 | struct sk_buff *nskb; | 3090 | struct sk_buff *nskb; |
3090 | 3091 | ||
3091 | nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); | 3092 | nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); |
3092 | if (!nskb) | 3093 | if (!nskb) |
3093 | goto resubmit; | 3094 | goto resubmit; |
3094 | 3095 | ||
3095 | skb = e->skb; | 3096 | ee = *e; |
3097 | |||
3098 | skb = ee.skb; | ||
3096 | prefetch(skb->data); | 3099 | prefetch(skb->data); |
3097 | 3100 | ||
3098 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | 3101 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { |
@@ -3101,8 +3104,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3101 | } | 3104 | } |
3102 | 3105 | ||
3103 | pci_unmap_single(skge->hw->pdev, | 3106 | pci_unmap_single(skge->hw->pdev, |
3104 | dma_unmap_addr(e, mapaddr), | 3107 | dma_unmap_addr(&ee, mapaddr), |
3105 | dma_unmap_len(e, maplen), | 3108 | dma_unmap_len(&ee, maplen), |
3106 | PCI_DMA_FROMDEVICE); | 3109 | PCI_DMA_FROMDEVICE); |
3107 | } | 3110 | } |
3108 | 3111 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 5472cbd34028..6ca30739625f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block) | |||
180 | return 0; | 180 | return 0; |
181 | } | 181 | } |
182 | 182 | ||
183 | static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) | 183 | static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, |
184 | int csum) | ||
184 | { | 185 | { |
185 | block->token = token; | 186 | block->token = token; |
186 | block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); | 187 | if (csum) { |
187 | block->sig = ~xor8_buf(block, sizeof(*block) - 1); | 188 | block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - |
189 | sizeof(block->data) - 2); | ||
190 | block->sig = ~xor8_buf(block, sizeof(*block) - 1); | ||
191 | } | ||
188 | } | 192 | } |
189 | 193 | ||
190 | static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) | 194 | static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) |
191 | { | 195 | { |
192 | struct mlx5_cmd_mailbox *next = msg->next; | 196 | struct mlx5_cmd_mailbox *next = msg->next; |
193 | 197 | ||
194 | while (next) { | 198 | while (next) { |
195 | calc_block_sig(next->buf, token); | 199 | calc_block_sig(next->buf, token, csum); |
196 | next = next->next; | 200 | next = next->next; |
197 | } | 201 | } |
198 | } | 202 | } |
199 | 203 | ||
200 | static void set_signature(struct mlx5_cmd_work_ent *ent) | 204 | static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) |
201 | { | 205 | { |
202 | ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); | 206 | ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); |
203 | calc_chain_sig(ent->in, ent->token); | 207 | calc_chain_sig(ent->in, ent->token, csum); |
204 | calc_chain_sig(ent->out, ent->token); | 208 | calc_chain_sig(ent->out, ent->token, csum); |
205 | } | 209 | } |
206 | 210 | ||
207 | static void poll_timeout(struct mlx5_cmd_work_ent *ent) | 211 | static void poll_timeout(struct mlx5_cmd_work_ent *ent) |
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work) | |||
539 | lay->type = MLX5_PCI_CMD_XPORT; | 543 | lay->type = MLX5_PCI_CMD_XPORT; |
540 | lay->token = ent->token; | 544 | lay->token = ent->token; |
541 | lay->status_own = CMD_OWNER_HW; | 545 | lay->status_own = CMD_OWNER_HW; |
542 | if (!cmd->checksum_disabled) | 546 | set_signature(ent, !cmd->checksum_disabled); |
543 | set_signature(ent); | ||
544 | dump_command(dev, ent, 1); | 547 | dump_command(dev, ent, 1); |
545 | ktime_get_ts(&ent->ts1); | 548 | ktime_get_ts(&ent->ts1); |
546 | 549 | ||
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) | |||
773 | 776 | ||
774 | copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); | 777 | copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); |
775 | block = next->buf; | 778 | block = next->buf; |
776 | if (xor8_buf(block, sizeof(*block)) != 0xff) | ||
777 | return -EINVAL; | ||
778 | 779 | ||
779 | memcpy(to, block->data, copy); | 780 | memcpy(to, block->data, copy); |
780 | to += copy; | 781 | to += copy; |
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) | |||
1361 | goto err_map; | 1362 | goto err_map; |
1362 | } | 1363 | } |
1363 | 1364 | ||
1365 | cmd->checksum_disabled = 1; | ||
1364 | cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; | 1366 | cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; |
1365 | cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; | 1367 | cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; |
1366 | 1368 | ||
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) | |||
1510 | case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; | 1512 | case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; |
1511 | case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; | 1513 | case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; |
1512 | case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; | 1514 | case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; |
1513 | case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; | 1515 | case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; |
1514 | case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; | 1516 | case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; |
1515 | case MLX5_CMD_STAT_IX_ERR: return -EINVAL; | 1517 | case MLX5_CMD_STAT_IX_ERR: return -EINVAL; |
1516 | case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; | 1518 | case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 443cc4d7b024..2231d93cc7ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, | |||
366 | goto err_in; | 366 | goto err_in; |
367 | } | 367 | } |
368 | 368 | ||
369 | snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", | ||
370 | name, pci_name(dev->pdev)); | ||
369 | eq->eqn = out.eq_number; | 371 | eq->eqn = out.eq_number; |
370 | err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, | 372 | err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, |
371 | name, eq); | 373 | eq->name, eq); |
372 | if (err) | 374 | if (err) |
373 | goto err_eq; | 375 | goto err_eq; |
374 | 376 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b47739b0b5f6..bc0f5fb66e24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) | |||
165 | struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; | 165 | struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; |
166 | struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; | 166 | struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; |
167 | struct mlx5_cmd_set_hca_cap_mbox_out set_out; | 167 | struct mlx5_cmd_set_hca_cap_mbox_out set_out; |
168 | struct mlx5_profile *prof = dev->profile; | ||
169 | u64 flags; | 168 | u64 flags; |
170 | int csum = 1; | ||
171 | int err; | 169 | int err; |
172 | 170 | ||
173 | memset(&query_ctx, 0, sizeof(query_ctx)); | 171 | memset(&query_ctx, 0, sizeof(query_ctx)); |
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) | |||
197 | memcpy(&set_ctx->hca_cap, &query_out->hca_cap, | 195 | memcpy(&set_ctx->hca_cap, &query_out->hca_cap, |
198 | sizeof(set_ctx->hca_cap)); | 196 | sizeof(set_ctx->hca_cap)); |
199 | 197 | ||
200 | if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) { | ||
201 | csum = !!prof->cmdif_csum; | ||
202 | flags = be64_to_cpu(set_ctx->hca_cap.flags); | ||
203 | if (csum) | ||
204 | flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | ||
205 | else | ||
206 | flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | ||
207 | |||
208 | set_ctx->hca_cap.flags = cpu_to_be64(flags); | ||
209 | } | ||
210 | |||
211 | if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) | 198 | if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) |
212 | set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; | 199 | set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; |
213 | 200 | ||
201 | flags = be64_to_cpu(query_out->hca_cap.flags); | ||
202 | /* disable checksum */ | ||
203 | flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | ||
204 | |||
205 | set_ctx->hca_cap.flags = cpu_to_be64(flags); | ||
214 | memset(&set_out, 0, sizeof(set_out)); | 206 | memset(&set_out, 0, sizeof(set_out)); |
215 | set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); | 207 | set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); |
216 | set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); | 208 | set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); |
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) | |||
225 | if (err) | 217 | if (err) |
226 | goto query_ex; | 218 | goto query_ex; |
227 | 219 | ||
228 | if (!csum) | ||
229 | dev->cmd.checksum_disabled = 1; | ||
230 | |||
231 | query_ex: | 220 | query_ex: |
232 | kfree(query_out); | 221 | kfree(query_out); |
233 | kfree(set_ctx); | 222 | kfree(set_ctx); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 3a2408d44820..7b12acf210f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox { | |||
90 | __be64 pas[0]; | 90 | __be64 pas[0]; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | enum { | ||
94 | MAX_RECLAIM_TIME_MSECS = 5000, | ||
95 | }; | ||
96 | |||
93 | static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) | 97 | static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) |
94 | { | 98 | { |
95 | struct rb_root *root = &dev->priv.page_root; | 99 | struct rb_root *root = &dev->priv.page_root; |
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
279 | int err; | 283 | int err; |
280 | int i; | 284 | int i; |
281 | 285 | ||
286 | if (nclaimed) | ||
287 | *nclaimed = 0; | ||
288 | |||
282 | memset(&in, 0, sizeof(in)); | 289 | memset(&in, 0, sizeof(in)); |
283 | outlen = sizeof(*out) + npages * sizeof(out->pas[0]); | 290 | outlen = sizeof(*out) + npages * sizeof(out->pas[0]); |
284 | out = mlx5_vzalloc(outlen); | 291 | out = mlx5_vzalloc(outlen); |
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void) | |||
388 | 395 | ||
389 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) | 396 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) |
390 | { | 397 | { |
391 | unsigned long end = jiffies + msecs_to_jiffies(5000); | 398 | unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); |
392 | struct fw_page *fwp; | 399 | struct fw_page *fwp; |
393 | struct rb_node *p; | 400 | struct rb_node *p; |
401 | int nclaimed = 0; | ||
394 | int err; | 402 | int err; |
395 | 403 | ||
396 | do { | 404 | do { |
397 | p = rb_first(&dev->priv.page_root); | 405 | p = rb_first(&dev->priv.page_root); |
398 | if (p) { | 406 | if (p) { |
399 | fwp = rb_entry(p, struct fw_page, rb_node); | 407 | fwp = rb_entry(p, struct fw_page, rb_node); |
400 | err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); | 408 | err = reclaim_pages(dev, fwp->func_id, |
409 | optimal_reclaimed_pages(), | ||
410 | &nclaimed); | ||
401 | if (err) { | 411 | if (err) { |
402 | mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); | 412 | mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); |
403 | return err; | 413 | return err; |
404 | } | 414 | } |
415 | if (nclaimed) | ||
416 | end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); | ||
405 | } | 417 | } |
406 | if (time_after(jiffies, end)) { | 418 | if (time_after(jiffies, end)) { |
407 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); | 419 | mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); |
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 83c2091c9c23..bd1a2d2bc2ae 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c | |||
@@ -543,7 +543,7 @@ static const struct of_device_id moxart_mac_match[] = { | |||
543 | { } | 543 | { } |
544 | }; | 544 | }; |
545 | 545 | ||
546 | struct __initdata platform_driver moxart_mac_driver = { | 546 | static struct platform_driver moxart_mac_driver = { |
547 | .probe = moxart_mac_probe, | 547 | .probe = moxart_mac_probe, |
548 | .remove = moxart_remove, | 548 | .remove = moxart_remove, |
549 | .driver = { | 549 | .driver = { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 4d7ad0074d1c..ebe4c86e5230 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | |||
@@ -1794,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { | |||
1794 | .set_msglevel = qlcnic_set_msglevel, | 1794 | .set_msglevel = qlcnic_set_msglevel, |
1795 | .get_msglevel = qlcnic_get_msglevel, | 1795 | .get_msglevel = qlcnic_get_msglevel, |
1796 | }; | 1796 | }; |
1797 | |||
1798 | const struct ethtool_ops qlcnic_ethtool_failed_ops = { | ||
1799 | .get_settings = qlcnic_get_settings, | ||
1800 | .get_drvinfo = qlcnic_get_drvinfo, | ||
1801 | .set_msglevel = qlcnic_set_msglevel, | ||
1802 | .get_msglevel = qlcnic_get_msglevel, | ||
1803 | .set_dump = qlcnic_set_dump, | ||
1804 | }; | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index c4c5023e1fdf..21d00a0449a1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -431,6 +431,9 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) | |||
431 | while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) | 431 | while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) |
432 | usleep_range(10000, 11000); | 432 | usleep_range(10000, 11000); |
433 | 433 | ||
434 | if (!adapter->fw_work.work.func) | ||
435 | return; | ||
436 | |||
434 | cancel_delayed_work_sync(&adapter->fw_work); | 437 | cancel_delayed_work_sync(&adapter->fw_work); |
435 | } | 438 | } |
436 | 439 | ||
@@ -2275,8 +2278,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2275 | adapter->portnum = adapter->ahw->pci_func; | 2278 | adapter->portnum = adapter->ahw->pci_func; |
2276 | err = qlcnic_start_firmware(adapter); | 2279 | err = qlcnic_start_firmware(adapter); |
2277 | if (err) { | 2280 | if (err) { |
2278 | dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); | 2281 | dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" |
2279 | goto err_out_free_hw; | 2282 | "\t\tIf reboot doesn't help, try flashing the card\n"); |
2283 | goto err_out_maintenance_mode; | ||
2280 | } | 2284 | } |
2281 | 2285 | ||
2282 | qlcnic_get_multiq_capability(adapter); | 2286 | qlcnic_get_multiq_capability(adapter); |
@@ -2408,6 +2412,22 @@ err_out_disable_pdev: | |||
2408 | pci_set_drvdata(pdev, NULL); | 2412 | pci_set_drvdata(pdev, NULL); |
2409 | pci_disable_device(pdev); | 2413 | pci_disable_device(pdev); |
2410 | return err; | 2414 | return err; |
2415 | |||
2416 | err_out_maintenance_mode: | ||
2417 | netdev->netdev_ops = &qlcnic_netdev_failed_ops; | ||
2418 | SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops); | ||
2419 | err = register_netdev(netdev); | ||
2420 | |||
2421 | if (err) { | ||
2422 | dev_err(&pdev->dev, "Failed to register net device\n"); | ||
2423 | qlcnic_clr_all_drv_state(adapter, 0); | ||
2424 | goto err_out_free_hw; | ||
2425 | } | ||
2426 | |||
2427 | pci_set_drvdata(pdev, adapter); | ||
2428 | qlcnic_add_sysfs(adapter); | ||
2429 | |||
2430 | return 0; | ||
2411 | } | 2431 | } |
2412 | 2432 | ||
2413 | static void qlcnic_remove(struct pci_dev *pdev) | 2433 | static void qlcnic_remove(struct pci_dev *pdev) |
@@ -2518,8 +2538,16 @@ static int qlcnic_resume(struct pci_dev *pdev) | |||
2518 | static int qlcnic_open(struct net_device *netdev) | 2538 | static int qlcnic_open(struct net_device *netdev) |
2519 | { | 2539 | { |
2520 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 2540 | struct qlcnic_adapter *adapter = netdev_priv(netdev); |
2541 | u32 state; | ||
2521 | int err; | 2542 | int err; |
2522 | 2543 | ||
2544 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | ||
2545 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) { | ||
2546 | netdev_err(netdev, "%s: Device is in FAILED state\n", __func__); | ||
2547 | |||
2548 | return -EIO; | ||
2549 | } | ||
2550 | |||
2523 | netif_carrier_off(netdev); | 2551 | netif_carrier_off(netdev); |
2524 | 2552 | ||
2525 | err = qlcnic_attach(adapter); | 2553 | err = qlcnic_attach(adapter); |
@@ -3228,6 +3256,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) | |||
3228 | return; | 3256 | return; |
3229 | 3257 | ||
3230 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | 3258 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); |
3259 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) { | ||
3260 | netdev_err(adapter->netdev, "%s: Device is in FAILED state\n", | ||
3261 | __func__); | ||
3262 | qlcnic_api_unlock(adapter); | ||
3263 | |||
3264 | return; | ||
3265 | } | ||
3231 | 3266 | ||
3232 | if (state == QLCNIC_DEV_READY) { | 3267 | if (state == QLCNIC_DEV_READY) { |
3233 | QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, | 3268 | QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 330d9a8774ad..686f460b1502 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | |||
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) | |||
397 | { | 397 | { |
398 | struct net_device *netdev = adapter->netdev; | 398 | struct net_device *netdev = adapter->netdev; |
399 | 399 | ||
400 | rtnl_lock(); | ||
400 | if (netif_running(netdev)) | 401 | if (netif_running(netdev)) |
401 | __qlcnic_down(adapter, netdev); | 402 | __qlcnic_down(adapter, netdev); |
402 | 403 | ||
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) | |||
407 | /* After disabling SRIOV re-init the driver in default mode | 408 | /* After disabling SRIOV re-init the driver in default mode |
408 | configure opmode based on op_mode of function | 409 | configure opmode based on op_mode of function |
409 | */ | 410 | */ |
410 | if (qlcnic_83xx_configure_opmode(adapter)) | 411 | if (qlcnic_83xx_configure_opmode(adapter)) { |
412 | rtnl_unlock(); | ||
411 | return -EIO; | 413 | return -EIO; |
414 | } | ||
412 | 415 | ||
413 | if (netif_running(netdev)) | 416 | if (netif_running(netdev)) |
414 | __qlcnic_up(adapter, netdev); | 417 | __qlcnic_up(adapter, netdev); |
415 | 418 | ||
419 | rtnl_unlock(); | ||
416 | return 0; | 420 | return 0; |
417 | } | 421 | } |
418 | 422 | ||
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) | |||
533 | return -EIO; | 537 | return -EIO; |
534 | } | 538 | } |
535 | 539 | ||
540 | rtnl_lock(); | ||
536 | if (netif_running(netdev)) | 541 | if (netif_running(netdev)) |
537 | __qlcnic_down(adapter, netdev); | 542 | __qlcnic_down(adapter, netdev); |
538 | 543 | ||
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) | |||
555 | __qlcnic_up(adapter, netdev); | 560 | __qlcnic_up(adapter, netdev); |
556 | 561 | ||
557 | error: | 562 | error: |
563 | rtnl_unlock(); | ||
558 | return err; | 564 | return err; |
559 | } | 565 | } |
560 | 566 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index c6165d05cc13..019f4377307f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -1272,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) | |||
1272 | void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) | 1272 | void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) |
1273 | { | 1273 | { |
1274 | struct device *dev = &adapter->pdev->dev; | 1274 | struct device *dev = &adapter->pdev->dev; |
1275 | u32 state; | ||
1275 | 1276 | ||
1276 | if (device_create_bin_file(dev, &bin_attr_port_stats)) | 1277 | if (device_create_bin_file(dev, &bin_attr_port_stats)) |
1277 | dev_info(dev, "failed to create port stats sysfs entry"); | 1278 | dev_info(dev, "failed to create port stats sysfs entry"); |
@@ -1285,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) | |||
1285 | if (device_create_bin_file(dev, &bin_attr_mem)) | 1286 | if (device_create_bin_file(dev, &bin_attr_mem)) |
1286 | dev_info(dev, "failed to create mem sysfs entry\n"); | 1287 | dev_info(dev, "failed to create mem sysfs entry\n"); |
1287 | 1288 | ||
1289 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | ||
1290 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) | ||
1291 | return; | ||
1292 | |||
1288 | if (device_create_bin_file(dev, &bin_attr_pci_config)) | 1293 | if (device_create_bin_file(dev, &bin_attr_pci_config)) |
1289 | dev_info(dev, "failed to create pci config sysfs entry"); | 1294 | dev_info(dev, "failed to create pci config sysfs entry"); |
1295 | |||
1290 | if (device_create_file(dev, &dev_attr_beacon)) | 1296 | if (device_create_file(dev, &dev_attr_beacon)) |
1291 | dev_info(dev, "failed to create beacon sysfs entry"); | 1297 | dev_info(dev, "failed to create beacon sysfs entry"); |
1292 | 1298 | ||
@@ -1307,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) | |||
1307 | void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) | 1313 | void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) |
1308 | { | 1314 | { |
1309 | struct device *dev = &adapter->pdev->dev; | 1315 | struct device *dev = &adapter->pdev->dev; |
1316 | u32 state; | ||
1310 | 1317 | ||
1311 | device_remove_bin_file(dev, &bin_attr_port_stats); | 1318 | device_remove_bin_file(dev, &bin_attr_port_stats); |
1312 | 1319 | ||
@@ -1315,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) | |||
1315 | device_remove_file(dev, &dev_attr_diag_mode); | 1322 | device_remove_file(dev, &dev_attr_diag_mode); |
1316 | device_remove_bin_file(dev, &bin_attr_crb); | 1323 | device_remove_bin_file(dev, &bin_attr_crb); |
1317 | device_remove_bin_file(dev, &bin_attr_mem); | 1324 | device_remove_bin_file(dev, &bin_attr_mem); |
1325 | |||
1326 | state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); | ||
1327 | if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) | ||
1328 | return; | ||
1329 | |||
1318 | device_remove_bin_file(dev, &bin_attr_pci_config); | 1330 | device_remove_bin_file(dev, &bin_attr_pci_config); |
1319 | device_remove_file(dev, &dev_attr_beacon); | 1331 | device_remove_file(dev, &dev_attr_beacon); |
1320 | if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) | 1332 | if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index 10093f0c4c0f..6bc5db703920 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c | |||
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) | |||
740 | int i; | 740 | int i; |
741 | 741 | ||
742 | if (!mpi_coredump) { | 742 | if (!mpi_coredump) { |
743 | netif_err(qdev, drv, qdev->ndev, "No memory available\n"); | 743 | netif_err(qdev, drv, qdev->ndev, "No memory allocated\n"); |
744 | return -ENOMEM; | 744 | return -EINVAL; |
745 | } | 745 | } |
746 | 746 | ||
747 | /* Try to get the spinlock, but dont worry if | 747 | /* Try to get the spinlock, but dont worry if |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c index ff2bf8a4e247..7ad146080c36 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c | |||
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work) | |||
1274 | return; | 1274 | return; |
1275 | } | 1275 | } |
1276 | 1276 | ||
1277 | if (!ql_core_dump(qdev, qdev->mpi_coredump)) { | 1277 | if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) { |
1278 | netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); | 1278 | netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); |
1279 | qdev->core_is_dumped = 1; | 1279 | qdev->core_is_dumped = 1; |
1280 | queue_delayed_work(qdev->workqueue, | 1280 | queue_delayed_work(qdev->workqueue, |
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 128d7cdf9eb2..c082562dbf4e 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c | |||
@@ -27,10 +27,10 @@ | |||
27 | 27 | ||
28 | /* A reboot/assertion causes the MCDI status word to be set after the | 28 | /* A reboot/assertion causes the MCDI status word to be set after the |
29 | * command word is set or a REBOOT event is sent. If we notice a reboot | 29 | * command word is set or a REBOOT event is sent. If we notice a reboot |
30 | * via these mechanisms then wait 20ms for the status word to be set. | 30 | * via these mechanisms then wait 250ms for the status word to be set. |
31 | */ | 31 | */ |
32 | #define MCDI_STATUS_DELAY_US 100 | 32 | #define MCDI_STATUS_DELAY_US 100 |
33 | #define MCDI_STATUS_DELAY_COUNT 200 | 33 | #define MCDI_STATUS_DELAY_COUNT 2500 |
34 | #define MCDI_STATUS_SLEEP_MS \ | 34 | #define MCDI_STATUS_SLEEP_MS \ |
35 | (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) | 35 | (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) |
36 | 36 | ||
@@ -800,9 +800,6 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | |||
800 | } else { | 800 | } else { |
801 | int count; | 801 | int count; |
802 | 802 | ||
803 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
804 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
805 | |||
806 | /* Consume the status word since efx_mcdi_rpc_finish() won't */ | 803 | /* Consume the status word since efx_mcdi_rpc_finish() won't */ |
807 | for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { | 804 | for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { |
808 | if (efx_mcdi_poll_reboot(efx)) | 805 | if (efx_mcdi_poll_reboot(efx)) |
@@ -810,6 +807,9 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | |||
810 | udelay(MCDI_STATUS_DELAY_US); | 807 | udelay(MCDI_STATUS_DELAY_US); |
811 | } | 808 | } |
812 | mcdi->new_epoch = true; | 809 | mcdi->new_epoch = true; |
810 | |||
811 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | ||
812 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | ||
813 | } | 813 | } |
814 | 814 | ||
815 | spin_unlock(&mcdi->iface_lock); | 815 | spin_unlock(&mcdi->iface_lock); |
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index c8f088ab5fdf..bdf697b184ae 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | 33 | ||
34 | #define DRV_NAME "via-rhine" | 34 | #define DRV_NAME "via-rhine" |
35 | #define DRV_VERSION "1.5.0" | 35 | #define DRV_VERSION "1.5.1" |
36 | #define DRV_RELDATE "2010-10-09" | 36 | #define DRV_RELDATE "2010-10-09" |
37 | 37 | ||
38 | #include <linux/types.h> | 38 | #include <linux/types.h> |
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1704 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); | 1704 | cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); |
1705 | 1705 | ||
1706 | if (unlikely(vlan_tx_tag_present(skb))) { | 1706 | if (unlikely(vlan_tx_tag_present(skb))) { |
1707 | rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); | 1707 | u16 vid_pcp = vlan_tx_tag_get(skb); |
1708 | |||
1709 | /* drop CFI/DEI bit, register needs VID and PCP */ | ||
1710 | vid_pcp = (vid_pcp & VLAN_VID_MASK) | | ||
1711 | ((vid_pcp & VLAN_PRIO_MASK) >> 1); | ||
1712 | rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); | ||
1708 | /* request tagging */ | 1713 | /* request tagging */ |
1709 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); | 1714 | rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); |
1710 | } | 1715 | } |
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index b88121f240ca..0029148077a9 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev) | |||
297 | lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); | 297 | lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); |
298 | lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); | 298 | lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); |
299 | 299 | ||
300 | /* Init descriptor indexes */ | ||
301 | lp->tx_bd_ci = 0; | ||
302 | lp->tx_bd_next = 0; | ||
303 | lp->tx_bd_tail = 0; | ||
304 | lp->rx_bd_ci = 0; | ||
305 | |||
300 | return 0; | 306 | return 0; |
301 | 307 | ||
302 | out: | 308 | out: |
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index a34d6bf5e43b..cc70ecfc7062 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c | |||
@@ -429,11 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty) | |||
429 | if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) | 429 | if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) |
430 | return; | 430 | return; |
431 | 431 | ||
432 | spin_lock(&sl->lock); | ||
432 | if (sl->xleft <= 0) { | 433 | if (sl->xleft <= 0) { |
433 | /* Now serial buffer is almost free & we can start | 434 | /* Now serial buffer is almost free & we can start |
434 | * transmission of another packet */ | 435 | * transmission of another packet */ |
435 | sl->dev->stats.tx_packets++; | 436 | sl->dev->stats.tx_packets++; |
436 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 437 | clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
438 | spin_unlock(&sl->lock); | ||
437 | sl_unlock(sl); | 439 | sl_unlock(sl); |
438 | return; | 440 | return; |
439 | } | 441 | } |
@@ -441,6 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty) | |||
441 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); | 443 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); |
442 | sl->xleft -= actual; | 444 | sl->xleft -= actual; |
443 | sl->xhead += actual; | 445 | sl->xhead += actual; |
446 | spin_unlock(&sl->lock); | ||
444 | } | 447 | } |
445 | 448 | ||
446 | static void sl_tx_timeout(struct net_device *dev) | 449 | static void sl_tx_timeout(struct net_device *dev) |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 2dbb9460349d..c6867f926cff 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -303,7 +303,7 @@ static void dm9601_set_multicast(struct net_device *net) | |||
303 | rx_ctl |= 0x02; | 303 | rx_ctl |= 0x02; |
304 | } else if (net->flags & IFF_ALLMULTI || | 304 | } else if (net->flags & IFF_ALLMULTI || |
305 | netdev_mc_count(net) > DM_MAX_MCAST) { | 305 | netdev_mc_count(net) > DM_MAX_MCAST) { |
306 | rx_ctl |= 0x04; | 306 | rx_ctl |= 0x08; |
307 | } else if (!netdev_mc_empty(net)) { | 307 | } else if (!netdev_mc_empty(net)) { |
308 | struct netdev_hw_addr *ha; | 308 | struct netdev_hw_addr *ha; |
309 | 309 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6312332afeba..3d6aaf79d8b2 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -714,7 +714,7 @@ static const struct usb_device_id products[] = { | |||
714 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 714 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
715 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ | 715 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ |
716 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | 716 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
717 | {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ | 717 | {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ |
718 | 718 | ||
719 | /* 4. Gobi 1000 devices */ | 719 | /* 4. Gobi 1000 devices */ |
720 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 720 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 7b331e613e02..bf94e10a37c8 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1241,7 +1241,9 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) | |||
1241 | if (num_sgs == 1) | 1241 | if (num_sgs == 1) |
1242 | return 0; | 1242 | return 0; |
1243 | 1243 | ||
1244 | urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC); | 1244 | /* reserve one for zero packet */ |
1245 | urb->sg = kmalloc((num_sgs + 1) * sizeof(struct scatterlist), | ||
1246 | GFP_ATOMIC); | ||
1245 | if (!urb->sg) | 1247 | if (!urb->sg) |
1246 | return -ENOMEM; | 1248 | return -ENOMEM; |
1247 | 1249 | ||
@@ -1305,7 +1307,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1305 | if (build_dma_sg(skb, urb) < 0) | 1307 | if (build_dma_sg(skb, urb) < 0) |
1306 | goto drop; | 1308 | goto drop; |
1307 | } | 1309 | } |
1308 | entry->length = length = urb->transfer_buffer_length; | 1310 | length = urb->transfer_buffer_length; |
1309 | 1311 | ||
1310 | /* don't assume the hardware handles USB_ZERO_PACKET | 1312 | /* don't assume the hardware handles USB_ZERO_PACKET |
1311 | * NOTE: strictly conforming cdc-ether devices should expect | 1313 | * NOTE: strictly conforming cdc-ether devices should expect |
@@ -1317,15 +1319,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1317 | if (length % dev->maxpacket == 0) { | 1319 | if (length % dev->maxpacket == 0) { |
1318 | if (!(info->flags & FLAG_SEND_ZLP)) { | 1320 | if (!(info->flags & FLAG_SEND_ZLP)) { |
1319 | if (!(info->flags & FLAG_MULTI_PACKET)) { | 1321 | if (!(info->flags & FLAG_MULTI_PACKET)) { |
1320 | urb->transfer_buffer_length++; | 1322 | length++; |
1321 | if (skb_tailroom(skb)) { | 1323 | if (skb_tailroom(skb) && !urb->num_sgs) { |
1322 | skb->data[skb->len] = 0; | 1324 | skb->data[skb->len] = 0; |
1323 | __skb_put(skb, 1); | 1325 | __skb_put(skb, 1); |
1324 | } | 1326 | } else if (urb->num_sgs) |
1327 | sg_set_buf(&urb->sg[urb->num_sgs++], | ||
1328 | dev->padding_pkt, 1); | ||
1325 | } | 1329 | } |
1326 | } else | 1330 | } else |
1327 | urb->transfer_flags |= URB_ZERO_PACKET; | 1331 | urb->transfer_flags |= URB_ZERO_PACKET; |
1328 | } | 1332 | } |
1333 | entry->length = urb->transfer_buffer_length = length; | ||
1329 | 1334 | ||
1330 | spin_lock_irqsave(&dev->txq.lock, flags); | 1335 | spin_lock_irqsave(&dev->txq.lock, flags); |
1331 | retval = usb_autopm_get_interface_async(dev->intf); | 1336 | retval = usb_autopm_get_interface_async(dev->intf); |
@@ -1509,6 +1514,7 @@ void usbnet_disconnect (struct usb_interface *intf) | |||
1509 | 1514 | ||
1510 | usb_kill_urb(dev->interrupt); | 1515 | usb_kill_urb(dev->interrupt); |
1511 | usb_free_urb(dev->interrupt); | 1516 | usb_free_urb(dev->interrupt); |
1517 | kfree(dev->padding_pkt); | ||
1512 | 1518 | ||
1513 | free_netdev(net); | 1519 | free_netdev(net); |
1514 | } | 1520 | } |
@@ -1679,9 +1685,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1679 | /* initialize max rx_qlen and tx_qlen */ | 1685 | /* initialize max rx_qlen and tx_qlen */ |
1680 | usbnet_update_max_qlen(dev); | 1686 | usbnet_update_max_qlen(dev); |
1681 | 1687 | ||
1688 | if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) && | ||
1689 | !(info->flags & FLAG_MULTI_PACKET)) { | ||
1690 | dev->padding_pkt = kzalloc(1, GFP_KERNEL); | ||
1691 | if (!dev->padding_pkt) | ||
1692 | goto out4; | ||
1693 | } | ||
1694 | |||
1682 | status = register_netdev (net); | 1695 | status = register_netdev (net); |
1683 | if (status) | 1696 | if (status) |
1684 | goto out4; | 1697 | goto out5; |
1685 | netif_info(dev, probe, dev->net, | 1698 | netif_info(dev, probe, dev->net, |
1686 | "register '%s' at usb-%s-%s, %s, %pM\n", | 1699 | "register '%s' at usb-%s-%s, %s, %pM\n", |
1687 | udev->dev.driver->name, | 1700 | udev->dev.driver->name, |
@@ -1699,6 +1712,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1699 | 1712 | ||
1700 | return 0; | 1713 | return 0; |
1701 | 1714 | ||
1715 | out5: | ||
1716 | kfree(dev->padding_pkt); | ||
1702 | out4: | 1717 | out4: |
1703 | usb_free_urb(dev->interrupt); | 1718 | usb_free_urb(dev->interrupt); |
1704 | out3: | 1719 | out3: |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d1292fe746bc..2ef5b6219f3f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -952,8 +952,7 @@ void vxlan_sock_release(struct vxlan_sock *vs) | |||
952 | 952 | ||
953 | spin_lock(&vn->sock_lock); | 953 | spin_lock(&vn->sock_lock); |
954 | hlist_del_rcu(&vs->hlist); | 954 | hlist_del_rcu(&vs->hlist); |
955 | smp_wmb(); | 955 | rcu_assign_sk_user_data(vs->sock->sk, NULL); |
956 | vs->sock->sk->sk_user_data = NULL; | ||
957 | vxlan_notify_del_rx_port(sk); | 956 | vxlan_notify_del_rx_port(sk); |
958 | spin_unlock(&vn->sock_lock); | 957 | spin_unlock(&vn->sock_lock); |
959 | 958 | ||
@@ -1048,8 +1047,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) | |||
1048 | 1047 | ||
1049 | port = inet_sk(sk)->inet_sport; | 1048 | port = inet_sk(sk)->inet_sport; |
1050 | 1049 | ||
1051 | smp_read_barrier_depends(); | 1050 | vs = rcu_dereference_sk_user_data(sk); |
1052 | vs = (struct vxlan_sock *)sk->sk_user_data; | ||
1053 | if (!vs) | 1051 | if (!vs) |
1054 | goto drop; | 1052 | goto drop; |
1055 | 1053 | ||
@@ -2302,8 +2300,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, | |||
2302 | atomic_set(&vs->refcnt, 1); | 2300 | atomic_set(&vs->refcnt, 1); |
2303 | vs->rcv = rcv; | 2301 | vs->rcv = rcv; |
2304 | vs->data = data; | 2302 | vs->data = data; |
2305 | smp_wmb(); | 2303 | rcu_assign_sk_user_data(vs->sock->sk, vs); |
2306 | vs->sock->sk->sk_user_data = vs; | ||
2307 | 2304 | ||
2308 | spin_lock(&vn->sock_lock); | 2305 | spin_lock(&vn->sock_lock); |
2309 | hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); | 2306 | hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 4ee472a5a4e4..ab9e3a8410bc 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -1270,13 +1270,6 @@ static void ath9k_antenna_check(struct ath_softc *sc, | |||
1270 | return; | 1270 | return; |
1271 | 1271 | ||
1272 | /* | 1272 | /* |
1273 | * All MPDUs in an aggregate will use the same LNA | ||
1274 | * as the first MPDU. | ||
1275 | */ | ||
1276 | if (rs->rs_isaggr && !rs->rs_firstaggr) | ||
1277 | return; | ||
1278 | |||
1279 | /* | ||
1280 | * Change the default rx antenna if rx diversity | 1273 | * Change the default rx antenna if rx diversity |
1281 | * chooses the other antenna 3 times in a row. | 1274 | * chooses the other antenna 3 times in a row. |
1282 | */ | 1275 | */ |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 35b515fe3ffa..5ac713d2ff5d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -399,6 +399,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) | |||
399 | tbf->bf_buf_addr = bf->bf_buf_addr; | 399 | tbf->bf_buf_addr = bf->bf_buf_addr; |
400 | memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); | 400 | memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); |
401 | tbf->bf_state = bf->bf_state; | 401 | tbf->bf_state = bf->bf_state; |
402 | tbf->bf_state.stale = false; | ||
402 | 403 | ||
403 | return tbf; | 404 | return tbf; |
404 | } | 405 | } |
@@ -1389,11 +1390,15 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, | |||
1389 | u16 tid, u16 *ssn) | 1390 | u16 tid, u16 *ssn) |
1390 | { | 1391 | { |
1391 | struct ath_atx_tid *txtid; | 1392 | struct ath_atx_tid *txtid; |
1393 | struct ath_txq *txq; | ||
1392 | struct ath_node *an; | 1394 | struct ath_node *an; |
1393 | u8 density; | 1395 | u8 density; |
1394 | 1396 | ||
1395 | an = (struct ath_node *)sta->drv_priv; | 1397 | an = (struct ath_node *)sta->drv_priv; |
1396 | txtid = ATH_AN_2_TID(an, tid); | 1398 | txtid = ATH_AN_2_TID(an, tid); |
1399 | txq = txtid->ac->txq; | ||
1400 | |||
1401 | ath_txq_lock(sc, txq); | ||
1397 | 1402 | ||
1398 | /* update ampdu factor/density, they may have changed. This may happen | 1403 | /* update ampdu factor/density, they may have changed. This may happen |
1399 | * in HT IBSS when a beacon with HT-info is received after the station | 1404 | * in HT IBSS when a beacon with HT-info is received after the station |
@@ -1417,6 +1422,8 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, | |||
1417 | memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); | 1422 | memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); |
1418 | txtid->baw_head = txtid->baw_tail = 0; | 1423 | txtid->baw_head = txtid->baw_tail = 0; |
1419 | 1424 | ||
1425 | ath_txq_unlock_complete(sc, txq); | ||
1426 | |||
1420 | return 0; | 1427 | return 0; |
1421 | } | 1428 | } |
1422 | 1429 | ||
@@ -1555,8 +1562,10 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, | |||
1555 | __skb_unlink(bf->bf_mpdu, tid_q); | 1562 | __skb_unlink(bf->bf_mpdu, tid_q); |
1556 | list_add_tail(&bf->list, &bf_q); | 1563 | list_add_tail(&bf->list, &bf_q); |
1557 | ath_set_rates(tid->an->vif, tid->an->sta, bf); | 1564 | ath_set_rates(tid->an->vif, tid->an->sta, bf); |
1558 | ath_tx_addto_baw(sc, tid, bf); | 1565 | if (bf_isampdu(bf)) { |
1559 | bf->bf_state.bf_type &= ~BUF_AGGR; | 1566 | ath_tx_addto_baw(sc, tid, bf); |
1567 | bf->bf_state.bf_type &= ~BUF_AGGR; | ||
1568 | } | ||
1560 | if (bf_tail) | 1569 | if (bf_tail) |
1561 | bf_tail->bf_next = bf; | 1570 | bf_tail->bf_next = bf; |
1562 | 1571 | ||
@@ -1950,7 +1959,9 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, | |||
1950 | if (bf_is_ampdu_not_probing(bf)) | 1959 | if (bf_is_ampdu_not_probing(bf)) |
1951 | txq->axq_ampdu_depth++; | 1960 | txq->axq_ampdu_depth++; |
1952 | 1961 | ||
1953 | bf = bf->bf_lastbf->bf_next; | 1962 | bf_last = bf->bf_lastbf; |
1963 | bf = bf_last->bf_next; | ||
1964 | bf_last->bf_next = NULL; | ||
1954 | } | 1965 | } |
1955 | } | 1966 | } |
1956 | } | 1967 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 64f4a2bc8dde..c3462b75bd08 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c | |||
@@ -464,8 +464,6 @@ static struct sdio_driver brcmf_sdmmc_driver = { | |||
464 | 464 | ||
465 | static int brcmf_sdio_pd_probe(struct platform_device *pdev) | 465 | static int brcmf_sdio_pd_probe(struct platform_device *pdev) |
466 | { | 466 | { |
467 | int ret; | ||
468 | |||
469 | brcmf_dbg(SDIO, "Enter\n"); | 467 | brcmf_dbg(SDIO, "Enter\n"); |
470 | 468 | ||
471 | brcmfmac_sdio_pdata = pdev->dev.platform_data; | 469 | brcmfmac_sdio_pdata = pdev->dev.platform_data; |
@@ -473,11 +471,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev) | |||
473 | if (brcmfmac_sdio_pdata->power_on) | 471 | if (brcmfmac_sdio_pdata->power_on) |
474 | brcmfmac_sdio_pdata->power_on(); | 472 | brcmfmac_sdio_pdata->power_on(); |
475 | 473 | ||
476 | ret = sdio_register_driver(&brcmf_sdmmc_driver); | 474 | return 0; |
477 | if (ret) | ||
478 | brcmf_err("sdio_register_driver failed: %d\n", ret); | ||
479 | |||
480 | return ret; | ||
481 | } | 475 | } |
482 | 476 | ||
483 | static int brcmf_sdio_pd_remove(struct platform_device *pdev) | 477 | static int brcmf_sdio_pd_remove(struct platform_device *pdev) |
@@ -500,6 +494,15 @@ static struct platform_driver brcmf_sdio_pd = { | |||
500 | } | 494 | } |
501 | }; | 495 | }; |
502 | 496 | ||
497 | void brcmf_sdio_register(void) | ||
498 | { | ||
499 | int ret; | ||
500 | |||
501 | ret = sdio_register_driver(&brcmf_sdmmc_driver); | ||
502 | if (ret) | ||
503 | brcmf_err("sdio_register_driver failed: %d\n", ret); | ||
504 | } | ||
505 | |||
503 | void brcmf_sdio_exit(void) | 506 | void brcmf_sdio_exit(void) |
504 | { | 507 | { |
505 | brcmf_dbg(SDIO, "Enter\n"); | 508 | brcmf_dbg(SDIO, "Enter\n"); |
@@ -510,18 +513,13 @@ void brcmf_sdio_exit(void) | |||
510 | sdio_unregister_driver(&brcmf_sdmmc_driver); | 513 | sdio_unregister_driver(&brcmf_sdmmc_driver); |
511 | } | 514 | } |
512 | 515 | ||
513 | void brcmf_sdio_init(void) | 516 | void __init brcmf_sdio_init(void) |
514 | { | 517 | { |
515 | int ret; | 518 | int ret; |
516 | 519 | ||
517 | brcmf_dbg(SDIO, "Enter\n"); | 520 | brcmf_dbg(SDIO, "Enter\n"); |
518 | 521 | ||
519 | ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); | 522 | ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); |
520 | if (ret == -ENODEV) { | 523 | if (ret == -ENODEV) |
521 | brcmf_dbg(SDIO, "No platform data available, registering without.\n"); | 524 | brcmf_dbg(SDIO, "No platform data available.\n"); |
522 | ret = sdio_register_driver(&brcmf_sdmmc_driver); | ||
523 | } | ||
524 | |||
525 | if (ret) | ||
526 | brcmf_err("driver registration failed: %d\n", ret); | ||
527 | } | 525 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h index f7c1985844e4..74156f84180c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h | |||
@@ -156,10 +156,11 @@ extern int brcmf_bus_start(struct device *dev); | |||
156 | #ifdef CONFIG_BRCMFMAC_SDIO | 156 | #ifdef CONFIG_BRCMFMAC_SDIO |
157 | extern void brcmf_sdio_exit(void); | 157 | extern void brcmf_sdio_exit(void); |
158 | extern void brcmf_sdio_init(void); | 158 | extern void brcmf_sdio_init(void); |
159 | extern void brcmf_sdio_register(void); | ||
159 | #endif | 160 | #endif |
160 | #ifdef CONFIG_BRCMFMAC_USB | 161 | #ifdef CONFIG_BRCMFMAC_USB |
161 | extern void brcmf_usb_exit(void); | 162 | extern void brcmf_usb_exit(void); |
162 | extern void brcmf_usb_init(void); | 163 | extern void brcmf_usb_register(void); |
163 | #endif | 164 | #endif |
164 | 165 | ||
165 | #endif /* _BRCMF_BUS_H_ */ | 166 | #endif /* _BRCMF_BUS_H_ */ |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index e067aec1fbf1..40e7f854e10f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c | |||
@@ -1231,21 +1231,23 @@ u32 brcmf_get_chip_info(struct brcmf_if *ifp) | |||
1231 | return bus->chip << 4 | bus->chiprev; | 1231 | return bus->chip << 4 | bus->chiprev; |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | static void brcmf_driver_init(struct work_struct *work) | 1234 | static void brcmf_driver_register(struct work_struct *work) |
1235 | { | 1235 | { |
1236 | brcmf_debugfs_init(); | ||
1237 | |||
1238 | #ifdef CONFIG_BRCMFMAC_SDIO | 1236 | #ifdef CONFIG_BRCMFMAC_SDIO |
1239 | brcmf_sdio_init(); | 1237 | brcmf_sdio_register(); |
1240 | #endif | 1238 | #endif |
1241 | #ifdef CONFIG_BRCMFMAC_USB | 1239 | #ifdef CONFIG_BRCMFMAC_USB |
1242 | brcmf_usb_init(); | 1240 | brcmf_usb_register(); |
1243 | #endif | 1241 | #endif |
1244 | } | 1242 | } |
1245 | static DECLARE_WORK(brcmf_driver_work, brcmf_driver_init); | 1243 | static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register); |
1246 | 1244 | ||
1247 | static int __init brcmfmac_module_init(void) | 1245 | static int __init brcmfmac_module_init(void) |
1248 | { | 1246 | { |
1247 | brcmf_debugfs_init(); | ||
1248 | #ifdef CONFIG_BRCMFMAC_SDIO | ||
1249 | brcmf_sdio_init(); | ||
1250 | #endif | ||
1249 | if (!schedule_work(&brcmf_driver_work)) | 1251 | if (!schedule_work(&brcmf_driver_work)) |
1250 | return -EBUSY; | 1252 | return -EBUSY; |
1251 | 1253 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 39e01a7c8556..f4aea47e0730 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c | |||
@@ -1539,7 +1539,7 @@ void brcmf_usb_exit(void) | |||
1539 | brcmf_release_fw(&fw_image_list); | 1539 | brcmf_release_fw(&fw_image_list); |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | void brcmf_usb_init(void) | 1542 | void brcmf_usb_register(void) |
1543 | { | 1543 | { |
1544 | brcmf_dbg(USB, "Enter\n"); | 1544 | brcmf_dbg(USB, "Enter\n"); |
1545 | INIT_LIST_HEAD(&fw_image_list); | 1545 | INIT_LIST_HEAD(&fw_image_list); |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 3a6544710c8a..edc5d105ff98 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c | |||
@@ -457,6 +457,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw) | |||
457 | if (err != 0) | 457 | if (err != 0) |
458 | brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", | 458 | brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n", |
459 | __func__, err); | 459 | __func__, err); |
460 | |||
461 | bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, true); | ||
460 | return err; | 462 | return err; |
461 | } | 463 | } |
462 | 464 | ||
@@ -479,6 +481,8 @@ static void brcms_ops_stop(struct ieee80211_hw *hw) | |||
479 | return; | 481 | return; |
480 | } | 482 | } |
481 | 483 | ||
484 | bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false); | ||
485 | |||
482 | /* put driver in down state */ | 486 | /* put driver in down state */ |
483 | spin_lock_bh(&wl->lock); | 487 | spin_lock_bh(&wl->lock); |
484 | brcms_down(wl); | 488 | brcms_down(wl); |
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index f5e6b489ed32..899cad34ccd3 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c | |||
@@ -42,7 +42,6 @@ struct hwbus_priv { | |||
42 | spinlock_t lock; /* Serialize all bus operations */ | 42 | spinlock_t lock; /* Serialize all bus operations */ |
43 | wait_queue_head_t wq; | 43 | wait_queue_head_t wq; |
44 | int claimed; | 44 | int claimed; |
45 | int irq_disabled; | ||
46 | }; | 45 | }; |
47 | 46 | ||
48 | #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) | 47 | #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) |
@@ -238,8 +237,6 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id) | |||
238 | struct hwbus_priv *self = dev_id; | 237 | struct hwbus_priv *self = dev_id; |
239 | 238 | ||
240 | if (self->core) { | 239 | if (self->core) { |
241 | disable_irq_nosync(self->func->irq); | ||
242 | self->irq_disabled = 1; | ||
243 | cw1200_irq_handler(self->core); | 240 | cw1200_irq_handler(self->core); |
244 | return IRQ_HANDLED; | 241 | return IRQ_HANDLED; |
245 | } else { | 242 | } else { |
@@ -253,9 +250,10 @@ static int cw1200_spi_irq_subscribe(struct hwbus_priv *self) | |||
253 | 250 | ||
254 | pr_debug("SW IRQ subscribe\n"); | 251 | pr_debug("SW IRQ subscribe\n"); |
255 | 252 | ||
256 | ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler, | 253 | ret = request_threaded_irq(self->func->irq, NULL, |
257 | IRQF_TRIGGER_HIGH, | 254 | cw1200_spi_irq_handler, |
258 | "cw1200_wlan_irq", self); | 255 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
256 | "cw1200_wlan_irq", self); | ||
259 | if (WARN_ON(ret < 0)) | 257 | if (WARN_ON(ret < 0)) |
260 | goto exit; | 258 | goto exit; |
261 | 259 | ||
@@ -273,22 +271,13 @@ exit: | |||
273 | 271 | ||
274 | static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) | 272 | static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) |
275 | { | 273 | { |
274 | int ret = 0; | ||
275 | |||
276 | pr_debug("SW IRQ unsubscribe\n"); | 276 | pr_debug("SW IRQ unsubscribe\n"); |
277 | disable_irq_wake(self->func->irq); | 277 | disable_irq_wake(self->func->irq); |
278 | free_irq(self->func->irq, self); | 278 | free_irq(self->func->irq, self); |
279 | 279 | ||
280 | return 0; | 280 | return ret; |
281 | } | ||
282 | |||
283 | static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable) | ||
284 | { | ||
285 | /* Disables are handled by the interrupt handler */ | ||
286 | if (enable && self->irq_disabled) { | ||
287 | enable_irq(self->func->irq); | ||
288 | self->irq_disabled = 0; | ||
289 | } | ||
290 | |||
291 | return 0; | ||
292 | } | 281 | } |
293 | 282 | ||
294 | static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) | 283 | static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) |
@@ -368,7 +357,6 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = { | |||
368 | .unlock = cw1200_spi_unlock, | 357 | .unlock = cw1200_spi_unlock, |
369 | .align_size = cw1200_spi_align_size, | 358 | .align_size = cw1200_spi_align_size, |
370 | .power_mgmt = cw1200_spi_pm, | 359 | .power_mgmt = cw1200_spi_pm, |
371 | .irq_enable = cw1200_spi_irq_enable, | ||
372 | }; | 360 | }; |
373 | 361 | ||
374 | /* Probe Function to be called by SPI stack when device is discovered */ | 362 | /* Probe Function to be called by SPI stack when device is discovered */ |
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c index 0b2061bbc68b..acdff0f7f952 100644 --- a/drivers/net/wireless/cw1200/fwio.c +++ b/drivers/net/wireless/cw1200/fwio.c | |||
@@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv) | |||
485 | 485 | ||
486 | /* Enable interrupt signalling */ | 486 | /* Enable interrupt signalling */ |
487 | priv->hwbus_ops->lock(priv->hwbus_priv); | 487 | priv->hwbus_ops->lock(priv->hwbus_priv); |
488 | ret = __cw1200_irq_enable(priv, 2); | 488 | ret = __cw1200_irq_enable(priv, 1); |
489 | priv->hwbus_ops->unlock(priv->hwbus_priv); | 489 | priv->hwbus_ops->unlock(priv->hwbus_priv); |
490 | if (ret < 0) | 490 | if (ret < 0) |
491 | goto unsubscribe; | 491 | goto unsubscribe; |
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h index 51dfb3a90735..8b2fc831c3de 100644 --- a/drivers/net/wireless/cw1200/hwbus.h +++ b/drivers/net/wireless/cw1200/hwbus.h | |||
@@ -28,7 +28,6 @@ struct hwbus_ops { | |||
28 | void (*unlock)(struct hwbus_priv *self); | 28 | void (*unlock)(struct hwbus_priv *self); |
29 | size_t (*align_size)(struct hwbus_priv *self, size_t size); | 29 | size_t (*align_size)(struct hwbus_priv *self, size_t size); |
30 | int (*power_mgmt)(struct hwbus_priv *self, bool suspend); | 30 | int (*power_mgmt)(struct hwbus_priv *self, bool suspend); |
31 | int (*irq_enable)(struct hwbus_priv *self, int enable); | ||
32 | }; | 31 | }; |
33 | 32 | ||
34 | #endif /* CW1200_HWBUS_H */ | 33 | #endif /* CW1200_HWBUS_H */ |
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c index 41bd7615ccaa..ff230b7aeedd 100644 --- a/drivers/net/wireless/cw1200/hwio.c +++ b/drivers/net/wireless/cw1200/hwio.c | |||
@@ -273,21 +273,6 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable) | |||
273 | u16 val16; | 273 | u16 val16; |
274 | int ret; | 274 | int ret; |
275 | 275 | ||
276 | /* We need to do this hack because the SPI layer can sleep on I/O | ||
277 | and the general path involves I/O to the device in interrupt | ||
278 | context. | ||
279 | |||
280 | However, the initial enable call needs to go to the hardware. | ||
281 | |||
282 | We don't worry about shutdown because we do a full reset which | ||
283 | clears the interrupt enabled bits. | ||
284 | */ | ||
285 | if (priv->hwbus_ops->irq_enable) { | ||
286 | ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable); | ||
287 | if (ret || enable < 2) | ||
288 | return ret; | ||
289 | } | ||
290 | |||
291 | if (HIF_8601_SILICON == priv->hw_type) { | 276 | if (HIF_8601_SILICON == priv->hw_type) { |
292 | ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); | 277 | ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); |
293 | if (ret < 0) { | 278 | if (ret < 0) { |
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 21c688264708..1214c587fd08 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c | |||
@@ -150,7 +150,7 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv, | |||
150 | */ | 150 | */ |
151 | int | 151 | int |
152 | mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, | 152 | mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
153 | struct mwifiex_ra_list_tbl *pra_list, int headroom, | 153 | struct mwifiex_ra_list_tbl *pra_list, |
154 | int ptrindex, unsigned long ra_list_flags) | 154 | int ptrindex, unsigned long ra_list_flags) |
155 | __releases(&priv->wmm.ra_list_spinlock) | 155 | __releases(&priv->wmm.ra_list_spinlock) |
156 | { | 156 | { |
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, | |||
160 | int pad = 0, ret; | 160 | int pad = 0, ret; |
161 | struct mwifiex_tx_param tx_param; | 161 | struct mwifiex_tx_param tx_param; |
162 | struct txpd *ptx_pd = NULL; | 162 | struct txpd *ptx_pd = NULL; |
163 | int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN; | ||
163 | 164 | ||
164 | skb_src = skb_peek(&pra_list->skb_head); | 165 | skb_src = skb_peek(&pra_list->skb_head); |
165 | if (!skb_src) { | 166 | if (!skb_src) { |
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.h b/drivers/net/wireless/mwifiex/11n_aggr.h index 900e1c62a0cc..892098d6a696 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.h +++ b/drivers/net/wireless/mwifiex/11n_aggr.h | |||
@@ -26,7 +26,7 @@ | |||
26 | int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, | 26 | int mwifiex_11n_deaggregate_pkt(struct mwifiex_private *priv, |
27 | struct sk_buff *skb); | 27 | struct sk_buff *skb); |
28 | int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, | 28 | int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, |
29 | struct mwifiex_ra_list_tbl *ptr, int headroom, | 29 | struct mwifiex_ra_list_tbl *ptr, |
30 | int ptr_index, unsigned long flags) | 30 | int ptr_index, unsigned long flags) |
31 | __releases(&priv->wmm.ra_list_spinlock); | 31 | __releases(&priv->wmm.ra_list_spinlock); |
32 | 32 | ||
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 2d761477d15e..a6c46f3b6e3a 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c | |||
@@ -1155,7 +1155,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, | |||
1155 | uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); | 1155 | uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions); |
1156 | 1156 | ||
1157 | if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && | 1157 | if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) && |
1158 | adapter->iface_type == MWIFIEX_SDIO) { | 1158 | adapter->iface_type != MWIFIEX_USB) { |
1159 | mwifiex_hs_activated_event(priv, true); | 1159 | mwifiex_hs_activated_event(priv, true); |
1160 | return 0; | 1160 | return 0; |
1161 | } else { | 1161 | } else { |
@@ -1167,8 +1167,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, | |||
1167 | } | 1167 | } |
1168 | if (conditions != HS_CFG_CANCEL) { | 1168 | if (conditions != HS_CFG_CANCEL) { |
1169 | adapter->is_hs_configured = true; | 1169 | adapter->is_hs_configured = true; |
1170 | if (adapter->iface_type == MWIFIEX_USB || | 1170 | if (adapter->iface_type == MWIFIEX_USB) |
1171 | adapter->iface_type == MWIFIEX_PCIE) | ||
1172 | mwifiex_hs_activated_event(priv, true); | 1171 | mwifiex_hs_activated_event(priv, true); |
1173 | } else { | 1172 | } else { |
1174 | adapter->is_hs_configured = false; | 1173 | adapter->is_hs_configured = false; |
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 2472d4b7f00e..1c70b8d09227 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c | |||
@@ -447,9 +447,6 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) | |||
447 | */ | 447 | */ |
448 | adapter->is_suspended = true; | 448 | adapter->is_suspended = true; |
449 | 449 | ||
450 | for (i = 0; i < adapter->priv_num; i++) | ||
451 | netif_carrier_off(adapter->priv[i]->netdev); | ||
452 | |||
453 | if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) | 450 | if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) |
454 | usb_kill_urb(card->rx_cmd.urb); | 451 | usb_kill_urb(card->rx_cmd.urb); |
455 | 452 | ||
@@ -509,10 +506,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf) | |||
509 | MWIFIEX_RX_CMD_BUF_SIZE); | 506 | MWIFIEX_RX_CMD_BUF_SIZE); |
510 | } | 507 | } |
511 | 508 | ||
512 | for (i = 0; i < adapter->priv_num; i++) | ||
513 | if (adapter->priv[i]->media_connected) | ||
514 | netif_carrier_on(adapter->priv[i]->netdev); | ||
515 | |||
516 | /* Disable Host Sleep */ | 509 | /* Disable Host Sleep */ |
517 | if (adapter->hs_activated) | 510 | if (adapter->hs_activated) |
518 | mwifiex_cancel_hs(mwifiex_get_priv(adapter, | 511 | mwifiex_cancel_hs(mwifiex_get_priv(adapter, |
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 2e8f9cdea54d..95fa3599b407 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c | |||
@@ -1239,8 +1239,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) | |||
1239 | if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && | 1239 | if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) && |
1240 | mwifiex_is_11n_aggragation_possible(priv, ptr, | 1240 | mwifiex_is_11n_aggragation_possible(priv, ptr, |
1241 | adapter->tx_buf_size)) | 1241 | adapter->tx_buf_size)) |
1242 | mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN, | 1242 | mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); |
1243 | ptr_index, flags); | ||
1244 | /* ra_list_spinlock has been freed in | 1243 | /* ra_list_spinlock has been freed in |
1245 | mwifiex_11n_aggregate_pkt() */ | 1244 | mwifiex_11n_aggregate_pkt() */ |
1246 | else | 1245 | else |
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index b9deef66cf4b..e328d3058c41 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = { | |||
83 | {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ | 83 | {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ |
84 | {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ | 84 | {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ |
85 | {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ | 85 | {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ |
86 | {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */ | ||
86 | {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ | 87 | {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ |
87 | {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ | 88 | {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ |
88 | {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ | 89 | {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ |
@@ -979,6 +980,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev, | |||
979 | if (err) { | 980 | if (err) { |
980 | dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " | 981 | dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " |
981 | "(%d)!\n", p54u_fwlist[i].fw, err); | 982 | "(%d)!\n", p54u_fwlist[i].fw, err); |
983 | usb_put_dev(udev); | ||
982 | } | 984 | } |
983 | 985 | ||
984 | return err; | 986 | return err; |
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index cc03e7c87cbe..703258742d28 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h | |||
@@ -2057,7 +2057,7 @@ struct rtl_priv { | |||
2057 | that it points to the data allocated | 2057 | that it points to the data allocated |
2058 | beyond this structure like: | 2058 | beyond this structure like: |
2059 | rtl_pci_priv or rtl_usb_priv */ | 2059 | rtl_pci_priv or rtl_usb_priv */ |
2060 | u8 priv[0]; | 2060 | u8 priv[0] __aligned(sizeof(void *)); |
2061 | }; | 2061 | }; |
2062 | 2062 | ||
2063 | #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) | 2063 | #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index a53782ef1540..b45bce20ad76 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -24,6 +24,12 @@ | |||
24 | struct backend_info { | 24 | struct backend_info { |
25 | struct xenbus_device *dev; | 25 | struct xenbus_device *dev; |
26 | struct xenvif *vif; | 26 | struct xenvif *vif; |
27 | |||
28 | /* This is the state that will be reflected in xenstore when any | ||
29 | * active hotplug script completes. | ||
30 | */ | ||
31 | enum xenbus_state state; | ||
32 | |||
27 | enum xenbus_state frontend_state; | 33 | enum xenbus_state frontend_state; |
28 | struct xenbus_watch hotplug_status_watch; | 34 | struct xenbus_watch hotplug_status_watch; |
29 | u8 have_hotplug_status_watch:1; | 35 | u8 have_hotplug_status_watch:1; |
@@ -136,6 +142,8 @@ static int netback_probe(struct xenbus_device *dev, | |||
136 | if (err) | 142 | if (err) |
137 | goto fail; | 143 | goto fail; |
138 | 144 | ||
145 | be->state = XenbusStateInitWait; | ||
146 | |||
139 | /* This kicks hotplug scripts, so do it immediately. */ | 147 | /* This kicks hotplug scripts, so do it immediately. */ |
140 | backend_create_xenvif(be); | 148 | backend_create_xenvif(be); |
141 | 149 | ||
@@ -208,24 +216,113 @@ static void backend_create_xenvif(struct backend_info *be) | |||
208 | kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); | 216 | kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); |
209 | } | 217 | } |
210 | 218 | ||
211 | 219 | static void backend_disconnect(struct backend_info *be) | |
212 | static void disconnect_backend(struct xenbus_device *dev) | ||
213 | { | 220 | { |
214 | struct backend_info *be = dev_get_drvdata(&dev->dev); | ||
215 | |||
216 | if (be->vif) | 221 | if (be->vif) |
217 | xenvif_disconnect(be->vif); | 222 | xenvif_disconnect(be->vif); |
218 | } | 223 | } |
219 | 224 | ||
220 | static void destroy_backend(struct xenbus_device *dev) | 225 | static void backend_connect(struct backend_info *be) |
221 | { | 226 | { |
222 | struct backend_info *be = dev_get_drvdata(&dev->dev); | 227 | if (be->vif) |
228 | connect(be); | ||
229 | } | ||
223 | 230 | ||
224 | if (be->vif) { | 231 | static inline void backend_switch_state(struct backend_info *be, |
225 | kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); | 232 | enum xenbus_state state) |
226 | xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); | 233 | { |
227 | xenvif_free(be->vif); | 234 | struct xenbus_device *dev = be->dev; |
228 | be->vif = NULL; | 235 | |
236 | pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state)); | ||
237 | be->state = state; | ||
238 | |||
239 | /* If we are waiting for a hotplug script then defer the | ||
240 | * actual xenbus state change. | ||
241 | */ | ||
242 | if (!be->have_hotplug_status_watch) | ||
243 | xenbus_switch_state(dev, state); | ||
244 | } | ||
245 | |||
246 | /* Handle backend state transitions: | ||
247 | * | ||
248 | * The backend state starts in InitWait and the following transitions are | ||
249 | * allowed. | ||
250 | * | ||
251 | * InitWait -> Connected | ||
252 | * | ||
253 | * ^ \ | | ||
254 | * | \ | | ||
255 | * | \ | | ||
256 | * | \ | | ||
257 | * | \ | | ||
258 | * | \ | | ||
259 | * | V V | ||
260 | * | ||
261 | * Closed <-> Closing | ||
262 | * | ||
263 | * The state argument specifies the eventual state of the backend and the | ||
264 | * function transitions to that state via the shortest path. | ||
265 | */ | ||
266 | static void set_backend_state(struct backend_info *be, | ||
267 | enum xenbus_state state) | ||
268 | { | ||
269 | while (be->state != state) { | ||
270 | switch (be->state) { | ||
271 | case XenbusStateClosed: | ||
272 | switch (state) { | ||
273 | case XenbusStateInitWait: | ||
274 | case XenbusStateConnected: | ||
275 | pr_info("%s: prepare for reconnect\n", | ||
276 | be->dev->nodename); | ||
277 | backend_switch_state(be, XenbusStateInitWait); | ||
278 | break; | ||
279 | case XenbusStateClosing: | ||
280 | backend_switch_state(be, XenbusStateClosing); | ||
281 | break; | ||
282 | default: | ||
283 | BUG(); | ||
284 | } | ||
285 | break; | ||
286 | case XenbusStateInitWait: | ||
287 | switch (state) { | ||
288 | case XenbusStateConnected: | ||
289 | backend_connect(be); | ||
290 | backend_switch_state(be, XenbusStateConnected); | ||
291 | break; | ||
292 | case XenbusStateClosing: | ||
293 | case XenbusStateClosed: | ||
294 | backend_switch_state(be, XenbusStateClosing); | ||
295 | break; | ||
296 | default: | ||
297 | BUG(); | ||
298 | } | ||
299 | break; | ||
300 | case XenbusStateConnected: | ||
301 | switch (state) { | ||
302 | case XenbusStateInitWait: | ||
303 | case XenbusStateClosing: | ||
304 | case XenbusStateClosed: | ||
305 | backend_disconnect(be); | ||
306 | backend_switch_state(be, XenbusStateClosing); | ||
307 | break; | ||
308 | default: | ||
309 | BUG(); | ||
310 | } | ||
311 | break; | ||
312 | case XenbusStateClosing: | ||
313 | switch (state) { | ||
314 | case XenbusStateInitWait: | ||
315 | case XenbusStateConnected: | ||
316 | case XenbusStateClosed: | ||
317 | backend_switch_state(be, XenbusStateClosed); | ||
318 | break; | ||
319 | default: | ||
320 | BUG(); | ||
321 | } | ||
322 | break; | ||
323 | default: | ||
324 | BUG(); | ||
325 | } | ||
229 | } | 326 | } |
230 | } | 327 | } |
231 | 328 | ||
@@ -237,40 +334,33 @@ static void frontend_changed(struct xenbus_device *dev, | |||
237 | { | 334 | { |
238 | struct backend_info *be = dev_get_drvdata(&dev->dev); | 335 | struct backend_info *be = dev_get_drvdata(&dev->dev); |
239 | 336 | ||
240 | pr_debug("frontend state %s\n", xenbus_strstate(frontend_state)); | 337 | pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state)); |
241 | 338 | ||
242 | be->frontend_state = frontend_state; | 339 | be->frontend_state = frontend_state; |
243 | 340 | ||
244 | switch (frontend_state) { | 341 | switch (frontend_state) { |
245 | case XenbusStateInitialising: | 342 | case XenbusStateInitialising: |
246 | if (dev->state == XenbusStateClosed) { | 343 | set_backend_state(be, XenbusStateInitWait); |
247 | pr_info("%s: prepare for reconnect\n", dev->nodename); | ||
248 | xenbus_switch_state(dev, XenbusStateInitWait); | ||
249 | } | ||
250 | break; | 344 | break; |
251 | 345 | ||
252 | case XenbusStateInitialised: | 346 | case XenbusStateInitialised: |
253 | break; | 347 | break; |
254 | 348 | ||
255 | case XenbusStateConnected: | 349 | case XenbusStateConnected: |
256 | if (dev->state == XenbusStateConnected) | 350 | set_backend_state(be, XenbusStateConnected); |
257 | break; | ||
258 | if (be->vif) | ||
259 | connect(be); | ||
260 | break; | 351 | break; |
261 | 352 | ||
262 | case XenbusStateClosing: | 353 | case XenbusStateClosing: |
263 | disconnect_backend(dev); | 354 | set_backend_state(be, XenbusStateClosing); |
264 | xenbus_switch_state(dev, XenbusStateClosing); | ||
265 | break; | 355 | break; |
266 | 356 | ||
267 | case XenbusStateClosed: | 357 | case XenbusStateClosed: |
268 | xenbus_switch_state(dev, XenbusStateClosed); | 358 | set_backend_state(be, XenbusStateClosed); |
269 | if (xenbus_dev_is_online(dev)) | 359 | if (xenbus_dev_is_online(dev)) |
270 | break; | 360 | break; |
271 | destroy_backend(dev); | ||
272 | /* fall through if not online */ | 361 | /* fall through if not online */ |
273 | case XenbusStateUnknown: | 362 | case XenbusStateUnknown: |
363 | set_backend_state(be, XenbusStateClosed); | ||
274 | device_unregister(&dev->dev); | 364 | device_unregister(&dev->dev); |
275 | break; | 365 | break; |
276 | 366 | ||
@@ -363,7 +453,9 @@ static void hotplug_status_changed(struct xenbus_watch *watch, | |||
363 | if (IS_ERR(str)) | 453 | if (IS_ERR(str)) |
364 | return; | 454 | return; |
365 | if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { | 455 | if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { |
366 | xenbus_switch_state(be->dev, XenbusStateConnected); | 456 | /* Complete any pending state change */ |
457 | xenbus_switch_state(be->dev, be->state); | ||
458 | |||
367 | /* Not interested in this watch anymore. */ | 459 | /* Not interested in this watch anymore. */ |
368 | unregister_hotplug_status_watch(be); | 460 | unregister_hotplug_status_watch(be); |
369 | } | 461 | } |
@@ -393,12 +485,8 @@ static void connect(struct backend_info *be) | |||
393 | err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, | 485 | err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, |
394 | hotplug_status_changed, | 486 | hotplug_status_changed, |
395 | "%s/%s", dev->nodename, "hotplug-status"); | 487 | "%s/%s", dev->nodename, "hotplug-status"); |
396 | if (err) { | 488 | if (!err) |
397 | /* Switch now, since we can't do a watch. */ | ||
398 | xenbus_switch_state(dev, XenbusStateConnected); | ||
399 | } else { | ||
400 | be->have_hotplug_status_watch = 1; | 489 | be->have_hotplug_status_watch = 1; |
401 | } | ||
402 | 490 | ||
403 | netif_wake_queue(be->vif->dev); | 491 | netif_wake_queue(be->vif->dev); |
404 | } | 492 | } |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 9d2009a9004d..78cc76053328 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
@@ -74,10 +74,4 @@ config OF_MTD | |||
74 | depends on MTD | 74 | depends on MTD |
75 | def_bool y | 75 | def_bool y |
76 | 76 | ||
77 | config OF_RESERVED_MEM | ||
78 | depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK)) | ||
79 | def_bool y | ||
80 | help | ||
81 | Initialization code for DMA reserved memory | ||
82 | |||
83 | endmenu # OF | 77 | endmenu # OF |
diff --git a/drivers/of/Makefile b/drivers/of/Makefile index ed9660adad77..efd05102c405 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile | |||
@@ -9,4 +9,3 @@ obj-$(CONFIG_OF_MDIO) += of_mdio.o | |||
9 | obj-$(CONFIG_OF_PCI) += of_pci.o | 9 | obj-$(CONFIG_OF_PCI) += of_pci.o |
10 | obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o | 10 | obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o |
11 | obj-$(CONFIG_OF_MTD) += of_mtd.o | 11 | obj-$(CONFIG_OF_MTD) += of_mtd.o |
12 | obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o | ||
diff --git a/drivers/of/base.c b/drivers/of/base.c index 865d3f66c86b..7d4c70f859e3 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -303,10 +303,8 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) | |||
303 | struct device_node *cpun, *cpus; | 303 | struct device_node *cpun, *cpus; |
304 | 304 | ||
305 | cpus = of_find_node_by_path("/cpus"); | 305 | cpus = of_find_node_by_path("/cpus"); |
306 | if (!cpus) { | 306 | if (!cpus) |
307 | pr_warn("Missing cpus node, bailing out\n"); | ||
308 | return NULL; | 307 | return NULL; |
309 | } | ||
310 | 308 | ||
311 | for_each_child_of_node(cpus, cpun) { | 309 | for_each_child_of_node(cpus, cpun) { |
312 | if (of_node_cmp(cpun->type, "cpu")) | 310 | if (of_node_cmp(cpun->type, "cpu")) |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 229dd9d69e18..a4fa9ad31b8f 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/random.h> | ||
22 | 21 | ||
23 | #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ | 22 | #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ |
24 | #ifdef CONFIG_PPC | 23 | #ifdef CONFIG_PPC |
@@ -803,14 +802,3 @@ void __init unflatten_device_tree(void) | |||
803 | } | 802 | } |
804 | 803 | ||
805 | #endif /* CONFIG_OF_EARLY_FLATTREE */ | 804 | #endif /* CONFIG_OF_EARLY_FLATTREE */ |
806 | |||
807 | /* Feed entire flattened device tree into the random pool */ | ||
808 | static int __init add_fdt_randomness(void) | ||
809 | { | ||
810 | if (initial_boot_params) | ||
811 | add_device_randomness(initial_boot_params, | ||
812 | be32_to_cpu(initial_boot_params->totalsize)); | ||
813 | |||
814 | return 0; | ||
815 | } | ||
816 | core_initcall(add_fdt_randomness); | ||
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c deleted file mode 100644 index 0fe40c7d6904..000000000000 --- a/drivers/of/of_reserved_mem.c +++ /dev/null | |||
@@ -1,173 +0,0 @@ | |||
1 | /* | ||
2 | * Device tree based initialization code for reserved memory. | ||
3 | * | ||
4 | * Copyright (c) 2013 Samsung Electronics Co., Ltd. | ||
5 | * http://www.samsung.com | ||
6 | * Author: Marek Szyprowski <m.szyprowski@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of the | ||
11 | * License or (at your optional) any later version of the license. | ||
12 | */ | ||
13 | |||
14 | #include <linux/memblock.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/of_fdt.h> | ||
18 | #include <linux/of_platform.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/sizes.h> | ||
21 | #include <linux/mm_types.h> | ||
22 | #include <linux/dma-contiguous.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/of_reserved_mem.h> | ||
25 | |||
26 | #define MAX_RESERVED_REGIONS 16 | ||
27 | struct reserved_mem { | ||
28 | phys_addr_t base; | ||
29 | unsigned long size; | ||
30 | struct cma *cma; | ||
31 | char name[32]; | ||
32 | }; | ||
33 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; | ||
34 | static int reserved_mem_count; | ||
35 | |||
36 | static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname, | ||
37 | int depth, void *data) | ||
38 | { | ||
39 | struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; | ||
40 | phys_addr_t base, size; | ||
41 | int is_cma, is_reserved; | ||
42 | unsigned long len; | ||
43 | const char *status; | ||
44 | __be32 *prop; | ||
45 | |||
46 | is_cma = IS_ENABLED(CONFIG_DMA_CMA) && | ||
47 | of_flat_dt_is_compatible(node, "linux,contiguous-memory-region"); | ||
48 | is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region"); | ||
49 | |||
50 | if (!is_reserved && !is_cma) { | ||
51 | /* ignore node and scan next one */ | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | status = of_get_flat_dt_prop(node, "status", &len); | ||
56 | if (status && strcmp(status, "okay") != 0) { | ||
57 | /* ignore disabled node nad scan next one */ | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | prop = of_get_flat_dt_prop(node, "reg", &len); | ||
62 | if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) * | ||
63 | sizeof(__be32))) { | ||
64 | pr_err("Reserved mem: node %s, incorrect \"reg\" property\n", | ||
65 | uname); | ||
66 | /* ignore node and scan next one */ | ||
67 | return 0; | ||
68 | } | ||
69 | base = dt_mem_next_cell(dt_root_addr_cells, &prop); | ||
70 | size = dt_mem_next_cell(dt_root_size_cells, &prop); | ||
71 | |||
72 | if (!size) { | ||
73 | /* ignore node and scan next one */ | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n", | ||
78 | uname, (unsigned long)base, (unsigned long)size / SZ_1M); | ||
79 | |||
80 | if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) | ||
81 | return -ENOSPC; | ||
82 | |||
83 | rmem->base = base; | ||
84 | rmem->size = size; | ||
85 | strlcpy(rmem->name, uname, sizeof(rmem->name)); | ||
86 | |||
87 | if (is_cma) { | ||
88 | struct cma *cma; | ||
89 | if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) { | ||
90 | rmem->cma = cma; | ||
91 | reserved_mem_count++; | ||
92 | if (of_get_flat_dt_prop(node, | ||
93 | "linux,default-contiguous-region", | ||
94 | NULL)) | ||
95 | dma_contiguous_set_default(cma); | ||
96 | } | ||
97 | } else if (is_reserved) { | ||
98 | if (memblock_remove(base, size) == 0) | ||
99 | reserved_mem_count++; | ||
100 | else | ||
101 | pr_err("Failed to reserve memory for %s\n", uname); | ||
102 | } | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static struct reserved_mem *get_dma_memory_region(struct device *dev) | ||
108 | { | ||
109 | struct device_node *node; | ||
110 | const char *name; | ||
111 | int i; | ||
112 | |||
113 | node = of_parse_phandle(dev->of_node, "memory-region", 0); | ||
114 | if (!node) | ||
115 | return NULL; | ||
116 | |||
117 | name = kbasename(node->full_name); | ||
118 | for (i = 0; i < reserved_mem_count; i++) | ||
119 | if (strcmp(name, reserved_mem[i].name) == 0) | ||
120 | return &reserved_mem[i]; | ||
121 | return NULL; | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * of_reserved_mem_device_init() - assign reserved memory region to given device | ||
126 | * | ||
127 | * This function assign memory region pointed by "memory-region" device tree | ||
128 | * property to the given device. | ||
129 | */ | ||
130 | void of_reserved_mem_device_init(struct device *dev) | ||
131 | { | ||
132 | struct reserved_mem *region = get_dma_memory_region(dev); | ||
133 | if (!region) | ||
134 | return; | ||
135 | |||
136 | if (region->cma) { | ||
137 | dev_set_cma_area(dev, region->cma); | ||
138 | pr_info("Assigned CMA %s to %s device\n", region->name, | ||
139 | dev_name(dev)); | ||
140 | } else { | ||
141 | if (dma_declare_coherent_memory(dev, region->base, region->base, | ||
142 | region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0) | ||
143 | pr_info("Declared reserved memory %s to %s device\n", | ||
144 | region->name, dev_name(dev)); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * of_reserved_mem_device_release() - release reserved memory device structures | ||
150 | * | ||
151 | * This function releases structures allocated for memory region handling for | ||
152 | * the given device. | ||
153 | */ | ||
154 | void of_reserved_mem_device_release(struct device *dev) | ||
155 | { | ||
156 | struct reserved_mem *region = get_dma_memory_region(dev); | ||
157 | if (!region && !region->cma) | ||
158 | dma_release_declared_memory(dev); | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * early_init_dt_scan_reserved_mem() - create reserved memory regions | ||
163 | * | ||
164 | * This function grabs memory from early allocator for device exclusive use | ||
165 | * defined in device tree structures. It should be called by arch specific code | ||
166 | * once the early allocator (memblock) has been activated and all other | ||
167 | * subsystems have already allocated/reserved memory. | ||
168 | */ | ||
169 | void __init early_init_dt_scan_reserved_mem(void) | ||
170 | { | ||
171 | of_scan_flat_dt_by_path("/memory/reserved-memory", | ||
172 | fdt_scan_reserved_mem, NULL); | ||
173 | } | ||
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 9b439ac63d8e..f6dcde220821 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/of_device.h> | 21 | #include <linux/of_device.h> |
22 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
23 | #include <linux/of_platform.h> | 23 | #include <linux/of_platform.h> |
24 | #include <linux/of_reserved_mem.h> | ||
25 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
26 | 25 | ||
27 | const struct of_device_id of_default_bus_match_table[] = { | 26 | const struct of_device_id of_default_bus_match_table[] = { |
@@ -219,8 +218,6 @@ static struct platform_device *of_platform_device_create_pdata( | |||
219 | dev->dev.bus = &platform_bus_type; | 218 | dev->dev.bus = &platform_bus_type; |
220 | dev->dev.platform_data = platform_data; | 219 | dev->dev.platform_data = platform_data; |
221 | 220 | ||
222 | of_reserved_mem_device_init(&dev->dev); | ||
223 | |||
224 | /* We do not fill the DMA ops for platform devices by default. | 221 | /* We do not fill the DMA ops for platform devices by default. |
225 | * This is currently the responsibility of the platform code | 222 | * This is currently the responsibility of the platform code |
226 | * to do such, possibly using a device notifier | 223 | * to do such, possibly using a device notifier |
@@ -228,7 +225,6 @@ static struct platform_device *of_platform_device_create_pdata( | |||
228 | 225 | ||
229 | if (of_device_add(dev) != 0) { | 226 | if (of_device_add(dev) != 0) { |
230 | platform_device_put(dev); | 227 | platform_device_put(dev); |
231 | of_reserved_mem_device_release(&dev->dev); | ||
232 | return NULL; | 228 | return NULL; |
233 | } | 229 | } |
234 | 230 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 0b7d23b4ad95..be12fbfcae10 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -994,14 +994,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus) | |||
994 | 994 | ||
995 | /* | 995 | /* |
996 | * This bridge should have been registered as a hotplug function | 996 | * This bridge should have been registered as a hotplug function |
997 | * under its parent, so the context has to be there. If not, we | 997 | * under its parent, so the context should be there, unless the |
998 | * are in deep goo. | 998 | * parent is going to be handled by pciehp, in which case this |
999 | * bridge is not interesting to us either. | ||
999 | */ | 1000 | */ |
1000 | mutex_lock(&acpiphp_context_lock); | 1001 | mutex_lock(&acpiphp_context_lock); |
1001 | context = acpiphp_get_context(handle); | 1002 | context = acpiphp_get_context(handle); |
1002 | if (WARN_ON(!context)) { | 1003 | if (!context) { |
1003 | mutex_unlock(&acpiphp_context_lock); | 1004 | mutex_unlock(&acpiphp_context_lock); |
1004 | put_device(&bus->dev); | 1005 | put_device(&bus->dev); |
1006 | pci_dev_put(bridge->pci_dev); | ||
1005 | kfree(bridge); | 1007 | kfree(bridge); |
1006 | return; | 1008 | return; |
1007 | } | 1009 | } |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e8ccf6c0f08a..bdd64b1b4817 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -1155,8 +1155,14 @@ static void pci_enable_bridge(struct pci_dev *dev) | |||
1155 | 1155 | ||
1156 | pci_enable_bridge(dev->bus->self); | 1156 | pci_enable_bridge(dev->bus->self); |
1157 | 1157 | ||
1158 | if (pci_is_enabled(dev)) | 1158 | if (pci_is_enabled(dev)) { |
1159 | if (!dev->is_busmaster) { | ||
1160 | dev_warn(&dev->dev, "driver skip pci_set_master, fix it!\n"); | ||
1161 | pci_set_master(dev); | ||
1162 | } | ||
1159 | return; | 1163 | return; |
1164 | } | ||
1165 | |||
1160 | retval = pci_enable_device(dev); | 1166 | retval = pci_enable_device(dev); |
1161 | if (retval) | 1167 | if (retval) |
1162 | dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", | 1168 | dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", |
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c index a138965c01cb..b8fcc38c0d11 100644 --- a/drivers/pinctrl/pinconf.c +++ b/drivers/pinctrl/pinconf.c | |||
@@ -490,7 +490,7 @@ exit: | |||
490 | * <devicename> <state> <pinname> are values that should match the pinctrl-maps | 490 | * <devicename> <state> <pinname> are values that should match the pinctrl-maps |
491 | * <newvalue> reflects the new config and is driver dependant | 491 | * <newvalue> reflects the new config and is driver dependant |
492 | */ | 492 | */ |
493 | static int pinconf_dbg_config_write(struct file *file, | 493 | static ssize_t pinconf_dbg_config_write(struct file *file, |
494 | const char __user *user_buf, size_t count, loff_t *ppos) | 494 | const char __user *user_buf, size_t count, loff_t *ppos) |
495 | { | 495 | { |
496 | struct pinctrl_maps *maps_node; | 496 | struct pinctrl_maps *maps_node; |
@@ -508,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file, | |||
508 | int i; | 508 | int i; |
509 | 509 | ||
510 | /* Get userspace string and assure termination */ | 510 | /* Get userspace string and assure termination */ |
511 | buf_size = min(count, (size_t)(sizeof(buf)-1)); | 511 | buf_size = min(count, sizeof(buf) - 1); |
512 | if (copy_from_user(buf, user_buf, buf_size)) | 512 | if (copy_from_user(buf, user_buf, buf_size)) |
513 | return -EFAULT; | 513 | return -EFAULT; |
514 | buf[buf_size] = 0; | 514 | buf[buf_size] = 0; |
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c index 2689f8d01a1e..155b1b3a0e7a 100644 --- a/drivers/pinctrl/pinctrl-exynos.c +++ b/drivers/pinctrl/pinctrl-exynos.c | |||
@@ -663,18 +663,18 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) | |||
663 | /* pin banks of s5pv210 pin-controller */ | 663 | /* pin banks of s5pv210 pin-controller */ |
664 | static struct samsung_pin_bank s5pv210_pin_bank[] = { | 664 | static struct samsung_pin_bank s5pv210_pin_bank[] = { |
665 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), | 665 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), |
666 | EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04), | 666 | EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04), |
667 | EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), | 667 | EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08), |
668 | EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c), | 668 | EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c), |
669 | EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10), | 669 | EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10), |
670 | EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14), | 670 | EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14), |
671 | EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18), | 671 | EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18), |
672 | EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c), | 672 | EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c), |
673 | EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20), | 673 | EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20), |
674 | EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24), | 674 | EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24), |
675 | EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28), | 675 | EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28), |
676 | EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c), | 676 | EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c), |
677 | EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30), | 677 | EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30), |
678 | EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34), | 678 | EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34), |
679 | EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), | 679 | EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), |
680 | EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), | 680 | EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), |
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c index 82638fac3cfa..30c4d356cb33 100644 --- a/drivers/pinctrl/pinctrl-palmas.c +++ b/drivers/pinctrl/pinctrl-palmas.c | |||
@@ -891,9 +891,10 @@ static int palmas_pinconf_set(struct pinctrl_dev *pctldev, | |||
891 | param = pinconf_to_config_param(configs[i]); | 891 | param = pinconf_to_config_param(configs[i]); |
892 | param_val = pinconf_to_config_argument(configs[i]); | 892 | param_val = pinconf_to_config_argument(configs[i]); |
893 | 893 | ||
894 | if (param == PIN_CONFIG_BIAS_PULL_PIN_DEFAULT) | ||
895 | continue; | ||
896 | |||
894 | switch (param) { | 897 | switch (param) { |
895 | case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: | ||
896 | return 0; | ||
897 | case PIN_CONFIG_BIAS_DISABLE: | 898 | case PIN_CONFIG_BIAS_DISABLE: |
898 | case PIN_CONFIG_BIAS_PULL_UP: | 899 | case PIN_CONFIG_BIAS_PULL_UP: |
899 | case PIN_CONFIG_BIAS_PULL_DOWN: | 900 | case PIN_CONFIG_BIAS_PULL_DOWN: |
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c index 622c4854977e..93c9e3899d5e 100644 --- a/drivers/pinctrl/pinctrl-tegra114.c +++ b/drivers/pinctrl/pinctrl-tegra114.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Arthur: Pritesh Raithatha <praithatha@nvidia.com> | 6 | * Author: Pritesh Raithatha <praithatha@nvidia.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify it | 8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms and conditions of the GNU General Public License, | 9 | * under the terms and conditions of the GNU General Public License, |
@@ -2763,7 +2763,6 @@ static struct platform_driver tegra114_pinctrl_driver = { | |||
2763 | }; | 2763 | }; |
2764 | module_platform_driver(tegra114_pinctrl_driver); | 2764 | module_platform_driver(tegra114_pinctrl_driver); |
2765 | 2765 | ||
2766 | MODULE_ALIAS("platform:tegra114-pinctrl"); | ||
2767 | MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>"); | 2766 | MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>"); |
2768 | MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver"); | 2767 | MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver"); |
2769 | MODULE_LICENSE("GPL v2"); | 2768 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index 1a7816390773..b9f2653e4ef9 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c | |||
@@ -709,7 +709,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt( | |||
709 | struct of_regulator_match **da9063_reg_matches) | 709 | struct of_regulator_match **da9063_reg_matches) |
710 | { | 710 | { |
711 | da9063_reg_matches = NULL; | 711 | da9063_reg_matches = NULL; |
712 | return PTR_ERR(-ENODEV); | 712 | return ERR_PTR(-ENODEV); |
713 | } | 713 | } |
714 | #endif | 714 | #endif |
715 | 715 | ||
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index 488dfe7ce9a6..7e2b165972e6 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c | |||
@@ -201,13 +201,7 @@ static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500}; | |||
201 | #define SMPS_CTRL_MODE_ECO 0x02 | 201 | #define SMPS_CTRL_MODE_ECO 0x02 |
202 | #define SMPS_CTRL_MODE_PWM 0x03 | 202 | #define SMPS_CTRL_MODE_PWM 0x03 |
203 | 203 | ||
204 | /* These values are derived from the data sheet. And are the number of steps | 204 | #define PALMAS_SMPS_NUM_VOLTAGES 122 |
205 | * where there is a voltage change, the ranges at beginning and end of register | ||
206 | * max/min values where there are no change are ommitted. | ||
207 | * | ||
208 | * So they are basically (maxV-minV)/stepV | ||
209 | */ | ||
210 | #define PALMAS_SMPS_NUM_VOLTAGES 117 | ||
211 | #define PALMAS_SMPS10_NUM_VOLTAGES 2 | 205 | #define PALMAS_SMPS10_NUM_VOLTAGES 2 |
212 | #define PALMAS_LDO_NUM_VOLTAGES 50 | 206 | #define PALMAS_LDO_NUM_VOLTAGES 50 |
213 | 207 | ||
@@ -979,6 +973,7 @@ static int palmas_regulators_probe(struct platform_device *pdev) | |||
979 | pmic->desc[id].min_uV = 900000; | 973 | pmic->desc[id].min_uV = 900000; |
980 | pmic->desc[id].uV_step = 50000; | 974 | pmic->desc[id].uV_step = 50000; |
981 | pmic->desc[id].linear_min_sel = 1; | 975 | pmic->desc[id].linear_min_sel = 1; |
976 | pmic->desc[id].enable_time = 500; | ||
982 | pmic->desc[id].vsel_reg = | 977 | pmic->desc[id].vsel_reg = |
983 | PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, | 978 | PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, |
984 | palmas_regs_info[id].vsel_addr); | 979 | palmas_regs_info[id].vsel_addr); |
@@ -997,6 +992,11 @@ static int palmas_regulators_probe(struct platform_device *pdev) | |||
997 | pmic->desc[id].min_uV = 450000; | 992 | pmic->desc[id].min_uV = 450000; |
998 | pmic->desc[id].uV_step = 25000; | 993 | pmic->desc[id].uV_step = 25000; |
999 | } | 994 | } |
995 | |||
996 | /* LOD6 in vibrator mode will have enable time 2000us */ | ||
997 | if (pdata && pdata->ldo6_vibrator && | ||
998 | (id == PALMAS_REG_LDO6)) | ||
999 | pmic->desc[id].enable_time = 2000; | ||
1000 | } else { | 1000 | } else { |
1001 | pmic->desc[id].n_voltages = 1; | 1001 | pmic->desc[id].n_voltages = 1; |
1002 | pmic->desc[id].ops = &palmas_ops_extreg; | 1002 | pmic->desc[id].ops = &palmas_ops_extreg; |
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c index d8e3e1262bc2..20c271d49dcb 100644 --- a/drivers/regulator/ti-abb-regulator.c +++ b/drivers/regulator/ti-abb-regulator.c | |||
@@ -279,8 +279,12 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb, | |||
279 | ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, | 279 | ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg, |
280 | abb->base); | 280 | abb->base); |
281 | 281 | ||
282 | /* program LDO VBB vset override if needed */ | 282 | /* |
283 | if (abb->ldo_base) | 283 | * program LDO VBB vset override if needed for !bypass mode |
284 | * XXX: Do not switch sequence - for !bypass, LDO override reset *must* | ||
285 | * be performed *before* switch to bias mode else VBB glitches. | ||
286 | */ | ||
287 | if (abb->ldo_base && info->opp_sel != TI_ABB_NOMINAL_OPP) | ||
284 | ti_abb_program_ldovbb(dev, abb, info); | 288 | ti_abb_program_ldovbb(dev, abb, info); |
285 | 289 | ||
286 | /* Initiate ABB ldo change */ | 290 | /* Initiate ABB ldo change */ |
@@ -295,6 +299,14 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb, | |||
295 | if (ret) | 299 | if (ret) |
296 | goto out; | 300 | goto out; |
297 | 301 | ||
302 | /* | ||
303 | * Reset LDO VBB vset override bypass mode | ||
304 | * XXX: Do not switch sequence - for bypass, LDO override reset *must* | ||
305 | * be performed *after* switch to bypass else VBB glitches. | ||
306 | */ | ||
307 | if (abb->ldo_base && info->opp_sel == TI_ABB_NOMINAL_OPP) | ||
308 | ti_abb_program_ldovbb(dev, abb, info); | ||
309 | |||
298 | out: | 310 | out: |
299 | return ret; | 311 | return ret; |
300 | } | 312 | } |
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index 1432b26ef2e9..2205fbc2c37b 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c | |||
@@ -63,7 +63,7 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data) | |||
63 | */ | 63 | */ |
64 | 64 | ||
65 | static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = { | 65 | static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = { |
66 | { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 14, | 66 | { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14, |
67 | .uV_step = 50000 }, | 67 | .uV_step = 50000 }, |
68 | { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31, | 68 | { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31, |
69 | .uV_step = 100000 }, | 69 | .uV_step = 100000 }, |
@@ -332,7 +332,7 @@ static struct platform_driver wm831x_gp_ldo_driver = { | |||
332 | */ | 332 | */ |
333 | 333 | ||
334 | static const struct regulator_linear_range wm831x_aldo_ranges[] = { | 334 | static const struct regulator_linear_range wm831x_aldo_ranges[] = { |
335 | { .min_uV = 1000000, .max_uV = 1650000, .min_sel = 0, .max_sel = 12, | 335 | { .min_uV = 1000000, .max_uV = 1600000, .min_sel = 0, .max_sel = 12, |
336 | .uV_step = 50000 }, | 336 | .uV_step = 50000 }, |
337 | { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31, | 337 | { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31, |
338 | .uV_step = 100000 }, | 338 | .uV_step = 100000 }, |
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index 835b5f0f344e..61ca9292a429 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c | |||
@@ -543,7 +543,7 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev, | |||
543 | } | 543 | } |
544 | 544 | ||
545 | static const struct regulator_linear_range wm8350_ldo_ranges[] = { | 545 | static const struct regulator_linear_range wm8350_ldo_ranges[] = { |
546 | { .min_uV = 900000, .max_uV = 1750000, .min_sel = 0, .max_sel = 15, | 546 | { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 15, |
547 | .uV_step = 50000 }, | 547 | .uV_step = 50000 }, |
548 | { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31, | 548 | { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31, |
549 | .uV_step = 100000 }, | 549 | .uV_step = 100000 }, |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 8cd34bf644b3..77df9cb00688 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -145,9 +145,11 @@ bool __init sclp_has_linemode(void) | |||
145 | 145 | ||
146 | if (sccb->header.response_code != 0x20) | 146 | if (sccb->header.response_code != 0x20) |
147 | return 0; | 147 | return 0; |
148 | if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)) | 148 | if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK))) |
149 | return 1; | 149 | return 0; |
150 | return 0; | 150 | if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))) |
151 | return 0; | ||
152 | return 1; | ||
151 | } | 153 | } |
152 | 154 | ||
153 | bool __init sclp_has_vt220(void) | 155 | bool __init sclp_has_vt220(void) |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index a0f47c83fd62..3f4ca4e09a4c 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -810,7 +810,7 @@ static void tty3270_resize_work(struct work_struct *work) | |||
810 | struct winsize ws; | 810 | struct winsize ws; |
811 | 811 | ||
812 | screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); | 812 | screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols); |
813 | if (!screen) | 813 | if (IS_ERR(screen)) |
814 | return; | 814 | return; |
815 | /* Switch to new output size */ | 815 | /* Switch to new output size */ |
816 | spin_lock_bh(&tp->view.lock); | 816 | spin_lock_bh(&tp->view.lock); |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index fd7cc566095a..d4ac60b4a56e 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -1583,7 +1583,7 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1583 | /* Initialize the hardware */ | 1583 | /* Initialize the hardware */ |
1584 | ret = clk_prepare_enable(clk); | 1584 | ret = clk_prepare_enable(clk); |
1585 | if (ret) | 1585 | if (ret) |
1586 | goto out_unmap_regs; | 1586 | goto out_free_irq; |
1587 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1587 | spi_writel(as, CR, SPI_BIT(SWRST)); |
1588 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1588 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
1589 | if (as->caps.has_wdrbt) { | 1589 | if (as->caps.has_wdrbt) { |
@@ -1614,6 +1614,7 @@ out_free_dma: | |||
1614 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1614 | spi_writel(as, CR, SPI_BIT(SWRST)); |
1615 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1615 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
1616 | clk_disable_unprepare(clk); | 1616 | clk_disable_unprepare(clk); |
1617 | out_free_irq: | ||
1617 | free_irq(irq, master); | 1618 | free_irq(irq, master); |
1618 | out_unmap_regs: | 1619 | out_unmap_regs: |
1619 | iounmap(as->regs); | 1620 | iounmap(as->regs); |
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c index 5655acf55bfe..6416798828e7 100644 --- a/drivers/spi/spi-clps711x.c +++ b/drivers/spi/spi-clps711x.c | |||
@@ -226,7 +226,6 @@ static int spi_clps711x_probe(struct platform_device *pdev) | |||
226 | dev_name(&pdev->dev), hw); | 226 | dev_name(&pdev->dev), hw); |
227 | if (ret) { | 227 | if (ret) { |
228 | dev_err(&pdev->dev, "Can't request IRQ\n"); | 228 | dev_err(&pdev->dev, "Can't request IRQ\n"); |
229 | clk_put(hw->spi_clk); | ||
230 | goto clk_out; | 229 | goto clk_out; |
231 | } | 230 | } |
232 | 231 | ||
@@ -247,7 +246,6 @@ err_out: | |||
247 | gpio_free(hw->chipselect[i]); | 246 | gpio_free(hw->chipselect[i]); |
248 | 247 | ||
249 | spi_master_put(master); | 248 | spi_master_put(master); |
250 | kfree(master); | ||
251 | 249 | ||
252 | return ret; | 250 | return ret; |
253 | } | 251 | } |
@@ -263,7 +261,6 @@ static int spi_clps711x_remove(struct platform_device *pdev) | |||
263 | gpio_free(hw->chipselect[i]); | 261 | gpio_free(hw->chipselect[i]); |
264 | 262 | ||
265 | spi_unregister_master(master); | 263 | spi_unregister_master(master); |
266 | kfree(master); | ||
267 | 264 | ||
268 | return 0; | 265 | return 0; |
269 | } | 266 | } |
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 6cd07d13ecab..4e44575bd87a 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
@@ -476,15 +476,9 @@ static int dspi_probe(struct platform_device *pdev) | |||
476 | master->bus_num = bus_num; | 476 | master->bus_num = bus_num; |
477 | 477 | ||
478 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 478 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
479 | if (!res) { | ||
480 | dev_err(&pdev->dev, "can't get platform resource\n"); | ||
481 | ret = -EINVAL; | ||
482 | goto out_master_put; | ||
483 | } | ||
484 | |||
485 | dspi->base = devm_ioremap_resource(&pdev->dev, res); | 479 | dspi->base = devm_ioremap_resource(&pdev->dev, res); |
486 | if (!dspi->base) { | 480 | if (IS_ERR(dspi->base)) { |
487 | ret = -EINVAL; | 481 | ret = PTR_ERR(dspi->base); |
488 | goto out_master_put; | 482 | goto out_master_put; |
489 | } | 483 | } |
490 | 484 | ||
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index dbc5e999a1f5..6adf4e35816d 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -522,8 +522,10 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
522 | psc_num = master->bus_num; | 522 | psc_num = master->bus_num; |
523 | snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); | 523 | snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num); |
524 | clk = devm_clk_get(dev, clk_name); | 524 | clk = devm_clk_get(dev, clk_name); |
525 | if (IS_ERR(clk)) | 525 | if (IS_ERR(clk)) { |
526 | ret = PTR_ERR(clk); | ||
526 | goto free_irq; | 527 | goto free_irq; |
528 | } | ||
527 | ret = clk_prepare_enable(clk); | 529 | ret = clk_prepare_enable(clk); |
528 | if (ret) | 530 | if (ret) |
529 | goto free_irq; | 531 | goto free_irq; |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 2eb06ee0b326..c1a50674c1e3 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -546,8 +546,17 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
546 | if (pm_runtime_suspended(&drv_data->pdev->dev)) | 546 | if (pm_runtime_suspended(&drv_data->pdev->dev)) |
547 | return IRQ_NONE; | 547 | return IRQ_NONE; |
548 | 548 | ||
549 | sccr1_reg = read_SSCR1(reg); | 549 | /* |
550 | * If the device is not yet in RPM suspended state and we get an | ||
551 | * interrupt that is meant for another device, check if status bits | ||
552 | * are all set to one. That means that the device is already | ||
553 | * powered off. | ||
554 | */ | ||
550 | status = read_SSSR(reg); | 555 | status = read_SSSR(reg); |
556 | if (status == ~0) | ||
557 | return IRQ_NONE; | ||
558 | |||
559 | sccr1_reg = read_SSCR1(reg); | ||
551 | 560 | ||
552 | /* Ignore possible writes if we don't need to write */ | 561 | /* Ignore possible writes if we don't need to write */ |
553 | if (!(sccr1_reg & SSCR1_TIE)) | 562 | if (!(sccr1_reg & SSCR1_TIE)) |
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 512b8893893b..a80376dc3a10 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -1428,6 +1428,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1428 | S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, | 1428 | S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN, |
1429 | sdd->regs + S3C64XX_SPI_INT_EN); | 1429 | sdd->regs + S3C64XX_SPI_INT_EN); |
1430 | 1430 | ||
1431 | pm_runtime_enable(&pdev->dev); | ||
1432 | |||
1431 | if (spi_register_master(master)) { | 1433 | if (spi_register_master(master)) { |
1432 | dev_err(&pdev->dev, "cannot register SPI master\n"); | 1434 | dev_err(&pdev->dev, "cannot register SPI master\n"); |
1433 | ret = -EBUSY; | 1435 | ret = -EBUSY; |
@@ -1440,8 +1442,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1440 | mem_res, | 1442 | mem_res, |
1441 | sdd->rx_dma.dmach, sdd->tx_dma.dmach); | 1443 | sdd->rx_dma.dmach, sdd->tx_dma.dmach); |
1442 | 1444 | ||
1443 | pm_runtime_enable(&pdev->dev); | ||
1444 | |||
1445 | return 0; | 1445 | return 0; |
1446 | 1446 | ||
1447 | err3: | 1447 | err3: |
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index 0b68cb592fa4..e488a90a98b8 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c | |||
@@ -296,6 +296,8 @@ static int hspi_probe(struct platform_device *pdev) | |||
296 | goto error1; | 296 | goto error1; |
297 | } | 297 | } |
298 | 298 | ||
299 | pm_runtime_enable(&pdev->dev); | ||
300 | |||
299 | master->num_chipselect = 1; | 301 | master->num_chipselect = 1; |
300 | master->bus_num = pdev->id; | 302 | master->bus_num = pdev->id; |
301 | master->setup = hspi_setup; | 303 | master->setup = hspi_setup; |
@@ -309,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev) | |||
309 | goto error1; | 311 | goto error1; |
310 | } | 312 | } |
311 | 313 | ||
312 | pm_runtime_enable(&pdev->dev); | ||
313 | |||
314 | return 0; | 314 | return 0; |
315 | 315 | ||
316 | error1: | 316 | error1: |
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c index 3ba4c5712dff..853f62b2b1a9 100644 --- a/drivers/staging/comedi/drivers/ni_65xx.c +++ b/drivers/staging/comedi/drivers/ni_65xx.c | |||
@@ -369,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev, | |||
369 | { | 369 | { |
370 | const struct ni_65xx_board *board = comedi_board(dev); | 370 | const struct ni_65xx_board *board = comedi_board(dev); |
371 | struct ni_65xx_private *devpriv = dev->private; | 371 | struct ni_65xx_private *devpriv = dev->private; |
372 | unsigned base_bitfield_channel; | 372 | int base_bitfield_channel; |
373 | const unsigned max_ports_per_bitfield = 5; | ||
374 | unsigned read_bits = 0; | 373 | unsigned read_bits = 0; |
375 | unsigned j; | 374 | int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1); |
375 | int port_offset; | ||
376 | 376 | ||
377 | base_bitfield_channel = CR_CHAN(insn->chanspec); | 377 | base_bitfield_channel = CR_CHAN(insn->chanspec); |
378 | for (j = 0; j < max_ports_per_bitfield; ++j) { | 378 | for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel); |
379 | const unsigned port_offset = | 379 | port_offset <= last_port_offset; port_offset++) { |
380 | ni_65xx_port_by_channel(base_bitfield_channel) + j; | 380 | unsigned port = sprivate(s)->base_port + port_offset; |
381 | const unsigned port = | 381 | int base_port_channel = port_offset * ni_65xx_channels_per_port; |
382 | sprivate(s)->base_port + port_offset; | ||
383 | unsigned base_port_channel; | ||
384 | unsigned port_mask, port_data, port_read_bits; | 382 | unsigned port_mask, port_data, port_read_bits; |
385 | int bitshift; | 383 | int bitshift = base_port_channel - base_bitfield_channel; |
386 | if (port >= ni_65xx_total_num_ports(board)) | 384 | |
385 | if (bitshift >= 32) | ||
387 | break; | 386 | break; |
388 | base_port_channel = port_offset * ni_65xx_channels_per_port; | ||
389 | port_mask = data[0]; | 387 | port_mask = data[0]; |
390 | port_data = data[1]; | 388 | port_data = data[1]; |
391 | bitshift = base_port_channel - base_bitfield_channel; | ||
392 | if (bitshift >= 32 || bitshift <= -32) | ||
393 | break; | ||
394 | if (bitshift > 0) { | 389 | if (bitshift > 0) { |
395 | port_mask >>= bitshift; | 390 | port_mask >>= bitshift; |
396 | port_data >>= bitshift; | 391 | port_data >>= bitshift; |
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 47c5888461ff..a2e52a0c53c9 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c | |||
@@ -41,7 +41,6 @@ struct imx_drm_device { | |||
41 | struct list_head encoder_list; | 41 | struct list_head encoder_list; |
42 | struct list_head connector_list; | 42 | struct list_head connector_list; |
43 | struct mutex mutex; | 43 | struct mutex mutex; |
44 | int references; | ||
45 | int pipes; | 44 | int pipes; |
46 | struct drm_fbdev_cma *fbhelper; | 45 | struct drm_fbdev_cma *fbhelper; |
47 | }; | 46 | }; |
@@ -241,8 +240,6 @@ struct drm_device *imx_drm_device_get(void) | |||
241 | } | 240 | } |
242 | } | 241 | } |
243 | 242 | ||
244 | imxdrm->references++; | ||
245 | |||
246 | return imxdrm->drm; | 243 | return imxdrm->drm; |
247 | 244 | ||
248 | unwind_crtc: | 245 | unwind_crtc: |
@@ -280,8 +277,6 @@ void imx_drm_device_put(void) | |||
280 | list_for_each_entry(enc, &imxdrm->encoder_list, list) | 277 | list_for_each_entry(enc, &imxdrm->encoder_list, list) |
281 | module_put(enc->owner); | 278 | module_put(enc->owner); |
282 | 279 | ||
283 | imxdrm->references--; | ||
284 | |||
285 | mutex_unlock(&imxdrm->mutex); | 280 | mutex_unlock(&imxdrm->mutex); |
286 | } | 281 | } |
287 | EXPORT_SYMBOL_GPL(imx_drm_device_put); | 282 | EXPORT_SYMBOL_GPL(imx_drm_device_put); |
@@ -485,7 +480,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, | |||
485 | 480 | ||
486 | mutex_lock(&imxdrm->mutex); | 481 | mutex_lock(&imxdrm->mutex); |
487 | 482 | ||
488 | if (imxdrm->references) { | 483 | if (imxdrm->drm->open_count) { |
489 | ret = -EBUSY; | 484 | ret = -EBUSY; |
490 | goto err_busy; | 485 | goto err_busy; |
491 | } | 486 | } |
@@ -564,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder, | |||
564 | 559 | ||
565 | mutex_lock(&imxdrm->mutex); | 560 | mutex_lock(&imxdrm->mutex); |
566 | 561 | ||
567 | if (imxdrm->references) { | 562 | if (imxdrm->drm->open_count) { |
568 | ret = -EBUSY; | 563 | ret = -EBUSY; |
569 | goto err_busy; | 564 | goto err_busy; |
570 | } | 565 | } |
@@ -709,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector, | |||
709 | 704 | ||
710 | mutex_lock(&imxdrm->mutex); | 705 | mutex_lock(&imxdrm->mutex); |
711 | 706 | ||
712 | if (imxdrm->references) { | 707 | if (imxdrm->drm->open_count) { |
713 | ret = -EBUSY; | 708 | ret = -EBUSY; |
714 | goto err_busy; | 709 | goto err_busy; |
715 | } | 710 | } |
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c index 2644edf438c1..c8b43442dc74 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c | |||
@@ -1387,7 +1387,7 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) | |||
1387 | if (nob > ulsm_nob) | 1387 | if (nob > ulsm_nob) |
1388 | return (-EINVAL); | 1388 | return (-EINVAL); |
1389 | 1389 | ||
1390 | if (copy_to_user (ulsm, lsm, sizeof(ulsm))) | 1390 | if (copy_to_user (ulsm, lsm, sizeof(*ulsm))) |
1391 | return (-EFAULT); | 1391 | return (-EFAULT); |
1392 | 1392 | ||
1393 | for (i = 0; i < lsm->lsm_stripe_count; i++) { | 1393 | for (i = 0; i < lsm->lsm_stripe_count; i++) { |
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c index d7b3c82b5ead..45dfe94199ae 100644 --- a/drivers/staging/octeon-usb/cvmx-usb.c +++ b/drivers/staging/octeon-usb/cvmx-usb.c | |||
@@ -604,7 +604,7 @@ int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number, | |||
604 | } | 604 | } |
605 | } | 605 | } |
606 | 606 | ||
607 | memset(usb, 0, sizeof(usb)); | 607 | memset(usb, 0, sizeof(*usb)); |
608 | usb->init_flags = flags; | 608 | usb->init_flags = flags; |
609 | 609 | ||
610 | /* Initialize the USB state structure */ | 610 | /* Initialize the USB state structure */ |
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c index c7ff2e4d1f23..9832dcbbd07f 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mp.c +++ b/drivers/staging/rtl8188eu/core/rtw_mp.c | |||
@@ -907,7 +907,7 @@ u32 mp_query_psd(struct adapter *pAdapter, u8 *data) | |||
907 | sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); | 907 | sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop); |
908 | } | 908 | } |
909 | 909 | ||
910 | _rtw_memset(data, '\0', sizeof(data)); | 910 | _rtw_memset(data, '\0', sizeof(*data)); |
911 | 911 | ||
912 | i = psd_start; | 912 | i = psd_start; |
913 | while (i < psd_stop) { | 913 | while (i < psd_stop) { |
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c index 9c2e7a20c09e..ec0028d4e61a 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c | |||
@@ -57,7 +57,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter) | |||
57 | u8 cut_ver, fab_ver; | 57 | u8 cut_ver, fab_ver; |
58 | 58 | ||
59 | /* Init Value */ | 59 | /* Init Value */ |
60 | _rtw_memset(dm_odm, 0, sizeof(dm_odm)); | 60 | _rtw_memset(dm_odm, 0, sizeof(*dm_odm)); |
61 | 61 | ||
62 | dm_odm->Adapter = Adapter; | 62 | dm_odm->Adapter = Adapter; |
63 | 63 | ||
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index cd4100fb3645..95953ebc0279 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c | |||
@@ -6973,7 +6973,7 @@ static int rtw_mp_ctx(struct net_device *dev, | |||
6973 | stop = strncmp(extra, "stop", 4); | 6973 | stop = strncmp(extra, "stop", 4); |
6974 | sscanf(extra, "count =%d, pkt", &count); | 6974 | sscanf(extra, "count =%d, pkt", &count); |
6975 | 6975 | ||
6976 | _rtw_memset(extra, '\0', sizeof(extra)); | 6976 | _rtw_memset(extra, '\0', sizeof(*extra)); |
6977 | 6977 | ||
6978 | if (stop == 0) { | 6978 | if (stop == 0) { |
6979 | bStartTest = 0; /* To set Stop */ | 6979 | bStartTest = 0; /* To set Stop */ |
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index d3078d200e50..9ca3180ebaa0 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c | |||
@@ -54,6 +54,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { | |||
54 | /*=== Customer ID ===*/ | 54 | /*=== Customer ID ===*/ |
55 | /****** 8188EUS ********/ | 55 | /****** 8188EUS ********/ |
56 | {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ | 56 | {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ |
57 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ | ||
57 | {} /* Terminating entry */ | 58 | {} /* Terminating entry */ |
58 | }; | 59 | }; |
59 | 60 | ||
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c index 5bc361b16d4c..56144014b7c9 100644 --- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c +++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c | |||
@@ -37,6 +37,8 @@ rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen) | |||
37 | /* Get TCB and local buffer from common pool. | 37 | /* Get TCB and local buffer from common pool. |
38 | (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ | 38 | (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */ |
39 | skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); | 39 | skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); |
40 | if (!skb) | ||
41 | return RT_STATUS_FAILURE; | ||
40 | memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); | 42 | memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev)); |
41 | tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); | 43 | tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); |
42 | tcb_desc->queue_index = TXCMD_QUEUE; | 44 | tcb_desc->queue_index = TXCMD_QUEUE; |
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c index d0cf7d8a20e5..8872e0f84f40 100644 --- a/drivers/staging/vt6656/iwctl.c +++ b/drivers/staging/vt6656/iwctl.c | |||
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, | |||
1634 | if (pMgmt == NULL) | 1634 | if (pMgmt == NULL) |
1635 | return -EFAULT; | 1635 | return -EFAULT; |
1636 | 1636 | ||
1637 | if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) | ||
1638 | return -ENODEV; | ||
1639 | |||
1637 | buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); | 1640 | buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL); |
1638 | if (buf == NULL) | 1641 | if (buf == NULL) |
1639 | return -ENOMEM; | 1642 | return -ENOMEM; |
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 536971786ae8..6f9d28182445 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c | |||
@@ -1098,6 +1098,8 @@ static int device_close(struct net_device *dev) | |||
1098 | memset(pMgmt->abyCurrBSSID, 0, 6); | 1098 | memset(pMgmt->abyCurrBSSID, 0, 6); |
1099 | pMgmt->eCurrState = WMAC_STATE_IDLE; | 1099 | pMgmt->eCurrState = WMAC_STATE_IDLE; |
1100 | 1100 | ||
1101 | pDevice->flags &= ~DEVICE_FLAGS_OPENED; | ||
1102 | |||
1101 | device_free_tx_bufs(pDevice); | 1103 | device_free_tx_bufs(pDevice); |
1102 | device_free_rx_bufs(pDevice); | 1104 | device_free_rx_bufs(pDevice); |
1103 | device_free_int_bufs(pDevice); | 1105 | device_free_int_bufs(pDevice); |
@@ -1109,7 +1111,6 @@ static int device_close(struct net_device *dev) | |||
1109 | usb_free_urb(pDevice->pInterruptURB); | 1111 | usb_free_urb(pDevice->pInterruptURB); |
1110 | 1112 | ||
1111 | BSSvClearNodeDBTable(pDevice, 0); | 1113 | BSSvClearNodeDBTable(pDevice, 0); |
1112 | pDevice->flags &=(~DEVICE_FLAGS_OPENED); | ||
1113 | 1114 | ||
1114 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); | 1115 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); |
1115 | 1116 | ||
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index fb743a8811bb..14f3e852215d 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c | |||
@@ -148,6 +148,8 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice) | |||
148 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); | 148 | DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n"); |
149 | 149 | ||
150 | for (ii = 0; ii < pDevice->cbTD; ii++) { | 150 | for (ii = 0; ii < pDevice->cbTD; ii++) { |
151 | if (!pDevice->apTD[ii]) | ||
152 | return NULL; | ||
151 | pContext = pDevice->apTD[ii]; | 153 | pContext = pDevice->apTD[ii]; |
152 | if (pContext->bBoolInUse == false) { | 154 | if (pContext->bBoolInUse == false) { |
153 | pContext->bBoolInUse = true; | 155 | pContext->bBoolInUse = true; |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 35b61f7d6c63..38e44b9abf0f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -753,7 +753,8 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd) | |||
753 | 753 | ||
754 | static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) | 754 | static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) |
755 | { | 755 | { |
756 | struct iscsi_cmd *cmd; | 756 | LIST_HEAD(ack_list); |
757 | struct iscsi_cmd *cmd, *cmd_p; | ||
757 | 758 | ||
758 | conn->exp_statsn = exp_statsn; | 759 | conn->exp_statsn = exp_statsn; |
759 | 760 | ||
@@ -761,19 +762,23 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) | |||
761 | return; | 762 | return; |
762 | 763 | ||
763 | spin_lock_bh(&conn->cmd_lock); | 764 | spin_lock_bh(&conn->cmd_lock); |
764 | list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { | 765 | list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) { |
765 | spin_lock(&cmd->istate_lock); | 766 | spin_lock(&cmd->istate_lock); |
766 | if ((cmd->i_state == ISTATE_SENT_STATUS) && | 767 | if ((cmd->i_state == ISTATE_SENT_STATUS) && |
767 | iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { | 768 | iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { |
768 | cmd->i_state = ISTATE_REMOVE; | 769 | cmd->i_state = ISTATE_REMOVE; |
769 | spin_unlock(&cmd->istate_lock); | 770 | spin_unlock(&cmd->istate_lock); |
770 | iscsit_add_cmd_to_immediate_queue(cmd, conn, | 771 | list_move_tail(&cmd->i_conn_node, &ack_list); |
771 | cmd->i_state); | ||
772 | continue; | 772 | continue; |
773 | } | 773 | } |
774 | spin_unlock(&cmd->istate_lock); | 774 | spin_unlock(&cmd->istate_lock); |
775 | } | 775 | } |
776 | spin_unlock_bh(&conn->cmd_lock); | 776 | spin_unlock_bh(&conn->cmd_lock); |
777 | |||
778 | list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { | ||
779 | list_del(&cmd->i_conn_node); | ||
780 | iscsit_free_cmd(cmd, false); | ||
781 | } | ||
777 | } | 782 | } |
778 | 783 | ||
779 | static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) | 784 | static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 14d1aed5af1d..ef6d836a4d09 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
@@ -1192,7 +1192,7 @@ get_target: | |||
1192 | */ | 1192 | */ |
1193 | alloc_tags: | 1193 | alloc_tags: |
1194 | tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); | 1194 | tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); |
1195 | tag_num += ISCSIT_EXTRA_TAGS; | 1195 | tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS; |
1196 | tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; | 1196 | tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; |
1197 | 1197 | ||
1198 | ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); | 1198 | ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); |
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index f2de28e178fd..b0cac0c342e1 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -736,7 +736,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) | |||
736 | * Fallthrough | 736 | * Fallthrough |
737 | */ | 737 | */ |
738 | case ISCSI_OP_SCSI_TMFUNC: | 738 | case ISCSI_OP_SCSI_TMFUNC: |
739 | rc = transport_generic_free_cmd(&cmd->se_cmd, 1); | 739 | rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); |
740 | if (!rc && shutdown && se_cmd && se_cmd->se_sess) { | 740 | if (!rc && shutdown && se_cmd && se_cmd->se_sess) { |
741 | __iscsit_free_cmd(cmd, true, shutdown); | 741 | __iscsit_free_cmd(cmd, true, shutdown); |
742 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 742 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); |
@@ -752,7 +752,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) | |||
752 | se_cmd = &cmd->se_cmd; | 752 | se_cmd = &cmd->se_cmd; |
753 | __iscsit_free_cmd(cmd, true, shutdown); | 753 | __iscsit_free_cmd(cmd, true, shutdown); |
754 | 754 | ||
755 | rc = transport_generic_free_cmd(&cmd->se_cmd, 1); | 755 | rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); |
756 | if (!rc && shutdown && se_cmd->se_sess) { | 756 | if (!rc && shutdown && se_cmd->se_sess) { |
757 | __iscsit_free_cmd(cmd, true, shutdown); | 757 | __iscsit_free_cmd(cmd, true, shutdown); |
758 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | 758 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 6c17295e8d7c..4714c6f8da4b 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -349,7 +349,16 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd) | |||
349 | { | 349 | { |
350 | struct se_device *dev = cmd->se_dev; | 350 | struct se_device *dev = cmd->se_dev; |
351 | 351 | ||
352 | cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; | 352 | /* |
353 | * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through | ||
354 | * within target_complete_ok_work() if the command was successfully | ||
355 | * sent to the backend driver. | ||
356 | */ | ||
357 | spin_lock_irq(&cmd->t_state_lock); | ||
358 | if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) | ||
359 | cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; | ||
360 | spin_unlock_irq(&cmd->t_state_lock); | ||
361 | |||
353 | /* | 362 | /* |
354 | * Unlock ->caw_sem originally obtained during sbc_compare_and_write() | 363 | * Unlock ->caw_sem originally obtained during sbc_compare_and_write() |
355 | * before the original READ I/O submission. | 364 | * before the original READ I/O submission. |
@@ -363,7 +372,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) | |||
363 | { | 372 | { |
364 | struct se_device *dev = cmd->se_dev; | 373 | struct se_device *dev = cmd->se_dev; |
365 | struct scatterlist *write_sg = NULL, *sg; | 374 | struct scatterlist *write_sg = NULL, *sg; |
366 | unsigned char *buf, *addr; | 375 | unsigned char *buf = NULL, *addr; |
367 | struct sg_mapping_iter m; | 376 | struct sg_mapping_iter m; |
368 | unsigned int offset = 0, len; | 377 | unsigned int offset = 0, len; |
369 | unsigned int nlbas = cmd->t_task_nolb; | 378 | unsigned int nlbas = cmd->t_task_nolb; |
@@ -378,6 +387,15 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) | |||
378 | */ | 387 | */ |
379 | if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) | 388 | if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) |
380 | return TCM_NO_SENSE; | 389 | return TCM_NO_SENSE; |
390 | /* | ||
391 | * Immediately exit + release dev->caw_sem if command has already | ||
392 | * been failed with a non-zero SCSI status. | ||
393 | */ | ||
394 | if (cmd->scsi_status) { | ||
395 | pr_err("compare_and_write_callback: non zero scsi_status:" | ||
396 | " 0x%02x\n", cmd->scsi_status); | ||
397 | goto out; | ||
398 | } | ||
381 | 399 | ||
382 | buf = kzalloc(cmd->data_length, GFP_KERNEL); | 400 | buf = kzalloc(cmd->data_length, GFP_KERNEL); |
383 | if (!buf) { | 401 | if (!buf) { |
@@ -508,6 +526,12 @@ sbc_compare_and_write(struct se_cmd *cmd) | |||
508 | cmd->transport_complete_callback = NULL; | 526 | cmd->transport_complete_callback = NULL; |
509 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 527 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
510 | } | 528 | } |
529 | /* | ||
530 | * Reset cmd->data_length to individual block_size in order to not | ||
531 | * confuse backend drivers that depend on this value matching the | ||
532 | * size of the I/O being submitted. | ||
533 | */ | ||
534 | cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; | ||
511 | 535 | ||
512 | ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, | 536 | ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, |
513 | DMA_FROM_DEVICE); | 537 | DMA_FROM_DEVICE); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 84747cc1aac0..81e945eefbbd 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -236,17 +236,24 @@ int transport_alloc_session_tags(struct se_session *se_sess, | |||
236 | { | 236 | { |
237 | int rc; | 237 | int rc; |
238 | 238 | ||
239 | se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, GFP_KERNEL); | 239 | se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, |
240 | GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); | ||
240 | if (!se_sess->sess_cmd_map) { | 241 | if (!se_sess->sess_cmd_map) { |
241 | pr_err("Unable to allocate se_sess->sess_cmd_map\n"); | 242 | se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); |
242 | return -ENOMEM; | 243 | if (!se_sess->sess_cmd_map) { |
244 | pr_err("Unable to allocate se_sess->sess_cmd_map\n"); | ||
245 | return -ENOMEM; | ||
246 | } | ||
243 | } | 247 | } |
244 | 248 | ||
245 | rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); | 249 | rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); |
246 | if (rc < 0) { | 250 | if (rc < 0) { |
247 | pr_err("Unable to init se_sess->sess_tag_pool," | 251 | pr_err("Unable to init se_sess->sess_tag_pool," |
248 | " tag_num: %u\n", tag_num); | 252 | " tag_num: %u\n", tag_num); |
249 | kfree(se_sess->sess_cmd_map); | 253 | if (is_vmalloc_addr(se_sess->sess_cmd_map)) |
254 | vfree(se_sess->sess_cmd_map); | ||
255 | else | ||
256 | kfree(se_sess->sess_cmd_map); | ||
250 | se_sess->sess_cmd_map = NULL; | 257 | se_sess->sess_cmd_map = NULL; |
251 | return -ENOMEM; | 258 | return -ENOMEM; |
252 | } | 259 | } |
@@ -412,7 +419,10 @@ void transport_free_session(struct se_session *se_sess) | |||
412 | { | 419 | { |
413 | if (se_sess->sess_cmd_map) { | 420 | if (se_sess->sess_cmd_map) { |
414 | percpu_ida_destroy(&se_sess->sess_tag_pool); | 421 | percpu_ida_destroy(&se_sess->sess_tag_pool); |
415 | kfree(se_sess->sess_cmd_map); | 422 | if (is_vmalloc_addr(se_sess->sess_cmd_map)) |
423 | vfree(se_sess->sess_cmd_map); | ||
424 | else | ||
425 | kfree(se_sess->sess_cmd_map); | ||
416 | } | 426 | } |
417 | kmem_cache_free(se_sess_cache, se_sess); | 427 | kmem_cache_free(se_sess_cache, se_sess); |
418 | } | 428 | } |
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 4d22e7d2adca..3da4fd10b9f8 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c | |||
@@ -298,8 +298,8 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op | |||
298 | (unsigned long long)xop->dst_lba); | 298 | (unsigned long long)xop->dst_lba); |
299 | 299 | ||
300 | if (dc != 0) { | 300 | if (dc != 0) { |
301 | xop->dbl = (desc[29] << 16) & 0xff; | 301 | xop->dbl = (desc[29] & 0xff) << 16; |
302 | xop->dbl |= (desc[30] << 8) & 0xff; | 302 | xop->dbl |= (desc[30] & 0xff) << 8; |
303 | xop->dbl |= desc[31] & 0xff; | 303 | xop->dbl |= desc[31] & 0xff; |
304 | 304 | ||
305 | pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); | 305 | pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index e61c36cbb866..c193af6a628f 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -636,6 +636,7 @@ struct console xenboot_console = { | |||
636 | .name = "xenboot", | 636 | .name = "xenboot", |
637 | .write = xenboot_write_console, | 637 | .write = xenboot_write_console, |
638 | .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, | 638 | .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME, |
639 | .index = -1, | ||
639 | }; | 640 | }; |
640 | #endif /* CONFIG_EARLY_PRINTK */ | 641 | #endif /* CONFIG_EARLY_PRINTK */ |
641 | 642 | ||
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index c9a9ddd1d0bc..7a744b69c3d1 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -1758,8 +1758,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
1758 | canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; | 1758 | canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; |
1759 | if (canon_change) { | 1759 | if (canon_change) { |
1760 | bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); | 1760 | bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); |
1761 | ldata->line_start = 0; | 1761 | ldata->line_start = ldata->canon_head = ldata->read_tail; |
1762 | ldata->canon_head = ldata->read_tail; | ||
1763 | ldata->erasing = 0; | 1762 | ldata->erasing = 0; |
1764 | ldata->lnext = 0; | 1763 | ldata->lnext = 0; |
1765 | } | 1764 | } |
@@ -2184,28 +2183,34 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, | |||
2184 | 2183 | ||
2185 | if (!input_available_p(tty, 0)) { | 2184 | if (!input_available_p(tty, 0)) { |
2186 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { | 2185 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { |
2187 | retval = -EIO; | 2186 | up_read(&tty->termios_rwsem); |
2188 | break; | 2187 | tty_flush_to_ldisc(tty); |
2189 | } | 2188 | down_read(&tty->termios_rwsem); |
2190 | if (tty_hung_up_p(file)) | 2189 | if (!input_available_p(tty, 0)) { |
2191 | break; | 2190 | retval = -EIO; |
2192 | if (!timeout) | 2191 | break; |
2193 | break; | 2192 | } |
2194 | if (file->f_flags & O_NONBLOCK) { | 2193 | } else { |
2195 | retval = -EAGAIN; | 2194 | if (tty_hung_up_p(file)) |
2196 | break; | 2195 | break; |
2197 | } | 2196 | if (!timeout) |
2198 | if (signal_pending(current)) { | 2197 | break; |
2199 | retval = -ERESTARTSYS; | 2198 | if (file->f_flags & O_NONBLOCK) { |
2200 | break; | 2199 | retval = -EAGAIN; |
2201 | } | 2200 | break; |
2202 | n_tty_set_room(tty); | 2201 | } |
2203 | up_read(&tty->termios_rwsem); | 2202 | if (signal_pending(current)) { |
2203 | retval = -ERESTARTSYS; | ||
2204 | break; | ||
2205 | } | ||
2206 | n_tty_set_room(tty); | ||
2207 | up_read(&tty->termios_rwsem); | ||
2204 | 2208 | ||
2205 | timeout = schedule_timeout(timeout); | 2209 | timeout = schedule_timeout(timeout); |
2206 | 2210 | ||
2207 | down_read(&tty->termios_rwsem); | 2211 | down_read(&tty->termios_rwsem); |
2208 | continue; | 2212 | continue; |
2213 | } | ||
2209 | } | 2214 | } |
2210 | __set_current_state(TASK_RUNNING); | 2215 | __set_current_state(TASK_RUNNING); |
2211 | 2216 | ||
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 52379e56a31e..44077c0b7670 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c | |||
@@ -667,30 +667,21 @@ static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf) | |||
667 | 667 | ||
668 | static int dma_push_rx(struct eg20t_port *priv, int size) | 668 | static int dma_push_rx(struct eg20t_port *priv, int size) |
669 | { | 669 | { |
670 | struct tty_struct *tty; | ||
671 | int room; | 670 | int room; |
672 | struct uart_port *port = &priv->port; | 671 | struct uart_port *port = &priv->port; |
673 | struct tty_port *tport = &port->state->port; | 672 | struct tty_port *tport = &port->state->port; |
674 | 673 | ||
675 | port = &priv->port; | ||
676 | tty = tty_port_tty_get(tport); | ||
677 | if (!tty) { | ||
678 | dev_dbg(priv->port.dev, "%s:tty is busy now", __func__); | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | room = tty_buffer_request_room(tport, size); | 674 | room = tty_buffer_request_room(tport, size); |
683 | 675 | ||
684 | if (room < size) | 676 | if (room < size) |
685 | dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", | 677 | dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", |
686 | size - room); | 678 | size - room); |
687 | if (!room) | 679 | if (!room) |
688 | return room; | 680 | return 0; |
689 | 681 | ||
690 | tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); | 682 | tty_insert_flip_string(tport, sg_virt(&priv->sg_rx), size); |
691 | 683 | ||
692 | port->icount.rx += room; | 684 | port->icount.rx += room; |
693 | tty_kref_put(tty); | ||
694 | 685 | ||
695 | return room; | 686 | return room; |
696 | } | 687 | } |
@@ -1098,6 +1089,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr) | |||
1098 | if (tty == NULL) { | 1089 | if (tty == NULL) { |
1099 | for (i = 0; error_msg[i] != NULL; i++) | 1090 | for (i = 0; error_msg[i] != NULL; i++) |
1100 | dev_err(&priv->pdev->dev, error_msg[i]); | 1091 | dev_err(&priv->pdev->dev, error_msg[i]); |
1092 | } else { | ||
1093 | tty_kref_put(tty); | ||
1101 | } | 1094 | } |
1102 | } | 1095 | } |
1103 | 1096 | ||
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index d0d972f7e43e..0489a2bdcdf9 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c | |||
@@ -732,7 +732,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data) | |||
732 | static void tegra_uart_stop_rx(struct uart_port *u) | 732 | static void tegra_uart_stop_rx(struct uart_port *u) |
733 | { | 733 | { |
734 | struct tegra_uart_port *tup = to_tegra_uport(u); | 734 | struct tegra_uart_port *tup = to_tegra_uport(u); |
735 | struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port); | 735 | struct tty_struct *tty; |
736 | struct tty_port *port = &u->state->port; | 736 | struct tty_port *port = &u->state->port; |
737 | struct dma_tx_state state; | 737 | struct dma_tx_state state; |
738 | unsigned long ier; | 738 | unsigned long ier; |
@@ -744,6 +744,8 @@ static void tegra_uart_stop_rx(struct uart_port *u) | |||
744 | if (!tup->rx_in_progress) | 744 | if (!tup->rx_in_progress) |
745 | return; | 745 | return; |
746 | 746 | ||
747 | tty = tty_port_tty_get(&tup->uport.state->port); | ||
748 | |||
747 | tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ | 749 | tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */ |
748 | 750 | ||
749 | ier = tup->ier_shadow; | 751 | ier = tup->ier_shadow; |
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 03ba081c5772..6fd60fece6b4 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c | |||
@@ -1201,6 +1201,9 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, | |||
1201 | } | 1201 | } |
1202 | return 0; | 1202 | return 0; |
1203 | case TCFLSH: | 1203 | case TCFLSH: |
1204 | retval = tty_check_change(tty); | ||
1205 | if (retval) | ||
1206 | return retval; | ||
1204 | return __tty_perform_flush(tty, arg); | 1207 | return __tty_perform_flush(tty, arg); |
1205 | default: | 1208 | default: |
1206 | /* Try the mode commands */ | 1209 | /* Try the mode commands */ |
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig index 4a851e15e58c..77b47d82c9a6 100644 --- a/drivers/usb/chipidea/Kconfig +++ b/drivers/usb/chipidea/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config USB_CHIPIDEA | 1 | config USB_CHIPIDEA |
2 | tristate "ChipIdea Highspeed Dual Role Controller" | 2 | tristate "ChipIdea Highspeed Dual Role Controller" |
3 | depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET) | 3 | depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA |
4 | help | 4 | help |
5 | Say Y here if your system has a dual role high speed USB | 5 | Say Y here if your system has a dual role high speed USB |
6 | controller based on ChipIdea silicon IP. Currently, only the | 6 | controller based on ChipIdea silicon IP. Currently, only the |
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 74d998d9b45b..be822a2c1776 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c | |||
@@ -131,7 +131,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
131 | if (ret) { | 131 | if (ret) { |
132 | dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", | 132 | dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", |
133 | ret); | 133 | ret); |
134 | goto err_clk; | 134 | goto err_phy; |
135 | } | 135 | } |
136 | } | 136 | } |
137 | 137 | ||
@@ -143,7 +143,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
143 | dev_err(&pdev->dev, | 143 | dev_err(&pdev->dev, |
144 | "Can't register ci_hdrc platform device, err=%d\n", | 144 | "Can't register ci_hdrc platform device, err=%d\n", |
145 | ret); | 145 | ret); |
146 | goto err_clk; | 146 | goto err_phy; |
147 | } | 147 | } |
148 | 148 | ||
149 | if (data->usbmisc_data) { | 149 | if (data->usbmisc_data) { |
@@ -164,6 +164,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
164 | 164 | ||
165 | disable_device: | 165 | disable_device: |
166 | ci_hdrc_remove_device(data->ci_pdev); | 166 | ci_hdrc_remove_device(data->ci_pdev); |
167 | err_phy: | ||
168 | if (data->phy) | ||
169 | usb_phy_shutdown(data->phy); | ||
167 | err_clk: | 170 | err_clk: |
168 | clk_disable_unprepare(data->clk); | 171 | clk_disable_unprepare(data->clk); |
169 | return ret; | 172 | return ret; |
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c index 042320a6c6c7..d514332ac081 100644 --- a/drivers/usb/chipidea/ci_hdrc_pci.c +++ b/drivers/usb/chipidea/ci_hdrc_pci.c | |||
@@ -129,7 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ci_hdrc_pci_id_table) = { | |||
129 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), | 129 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), |
130 | .driver_data = (kernel_ulong_t)&penwell_pci_platdata, | 130 | .driver_data = (kernel_ulong_t)&penwell_pci_platdata, |
131 | }, | 131 | }, |
132 | { 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ } | 132 | { |
133 | /* Intel Clovertrail */ | ||
134 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006), | ||
135 | .driver_data = (kernel_ulong_t)&penwell_pci_platdata, | ||
136 | }, | ||
137 | { 0 } /* end: all zeroes */ | ||
133 | }; | 138 | }; |
134 | MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); | 139 | MODULE_DEVICE_TABLE(pci, ci_hdrc_pci_id_table); |
135 | 140 | ||
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 94626409559a..23763dcec069 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c | |||
@@ -605,6 +605,7 @@ static int ci_hdrc_remove(struct platform_device *pdev) | |||
605 | dbg_remove_files(ci); | 605 | dbg_remove_files(ci); |
606 | free_irq(ci->irq, ci); | 606 | free_irq(ci->irq, ci); |
607 | ci_role_destroy(ci); | 607 | ci_role_destroy(ci); |
608 | kfree(ci->hw_bank.regmap); | ||
608 | 609 | ||
609 | return 0; | 610 | return 0; |
610 | } | 611 | } |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 6b4c2f2eb946..9333083dd111 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -1600,6 +1600,8 @@ static void destroy_eps(struct ci_hdrc *ci) | |||
1600 | for (i = 0; i < ci->hw_ep_max; i++) { | 1600 | for (i = 0; i < ci->hw_ep_max; i++) { |
1601 | struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; | 1601 | struct ci_hw_ep *hwep = &ci->ci_hw_ep[i]; |
1602 | 1602 | ||
1603 | if (hwep->pending_td) | ||
1604 | free_pending_td(hwep); | ||
1603 | dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); | 1605 | dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma); |
1604 | } | 1606 | } |
1605 | } | 1607 | } |
@@ -1667,13 +1669,13 @@ static int ci_udc_stop(struct usb_gadget *gadget, | |||
1667 | if (ci->platdata->notify_event) | 1669 | if (ci->platdata->notify_event) |
1668 | ci->platdata->notify_event(ci, | 1670 | ci->platdata->notify_event(ci, |
1669 | CI_HDRC_CONTROLLER_STOPPED_EVENT); | 1671 | CI_HDRC_CONTROLLER_STOPPED_EVENT); |
1670 | ci->driver = NULL; | ||
1671 | spin_unlock_irqrestore(&ci->lock, flags); | 1672 | spin_unlock_irqrestore(&ci->lock, flags); |
1672 | _gadget_stop_activity(&ci->gadget); | 1673 | _gadget_stop_activity(&ci->gadget); |
1673 | spin_lock_irqsave(&ci->lock, flags); | 1674 | spin_lock_irqsave(&ci->lock, flags); |
1674 | pm_runtime_put(&ci->gadget.dev); | 1675 | pm_runtime_put(&ci->gadget.dev); |
1675 | } | 1676 | } |
1676 | 1677 | ||
1678 | ci->driver = NULL; | ||
1677 | spin_unlock_irqrestore(&ci->lock, flags); | 1679 | spin_unlock_irqrestore(&ci->lock, flags); |
1678 | 1680 | ||
1679 | return 0; | 1681 | return 0; |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 737e3c19967b..71dc5d768fa5 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -742,6 +742,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, | |||
742 | if ((index & ~USB_DIR_IN) == 0) | 742 | if ((index & ~USB_DIR_IN) == 0) |
743 | return 0; | 743 | return 0; |
744 | ret = findintfep(ps->dev, index); | 744 | ret = findintfep(ps->dev, index); |
745 | if (ret < 0) { | ||
746 | /* | ||
747 | * Some not fully compliant Win apps seem to get | ||
748 | * index wrong and have the endpoint number here | ||
749 | * rather than the endpoint address (with the | ||
750 | * correct direction). Win does let this through, | ||
751 | * so we'll not reject it here but leave it to | ||
752 | * the device to not break KVM. But we warn. | ||
753 | */ | ||
754 | ret = findintfep(ps->dev, index ^ 0x80); | ||
755 | if (ret >= 0) | ||
756 | dev_info(&ps->dev->dev, | ||
757 | "%s: process %i (%s) requesting ep %02x but needs %02x\n", | ||
758 | __func__, task_pid_nr(current), | ||
759 | current->comm, index, index ^ 0x80); | ||
760 | } | ||
745 | if (ret >= 0) | 761 | if (ret >= 0) |
746 | ret = checkintf(ps, ret); | 762 | ret = checkintf(ps, ret); |
747 | break; | 763 | break; |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index dde4c83516a1..e6b682c6c236 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -3426,6 +3426,9 @@ static int usb_req_set_sel(struct usb_device *udev, enum usb3_link_state state) | |||
3426 | unsigned long long u2_pel; | 3426 | unsigned long long u2_pel; |
3427 | int ret; | 3427 | int ret; |
3428 | 3428 | ||
3429 | if (udev->state != USB_STATE_CONFIGURED) | ||
3430 | return 0; | ||
3431 | |||
3429 | /* Convert SEL and PEL stored in ns to us */ | 3432 | /* Convert SEL and PEL stored in ns to us */ |
3430 | u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); | 3433 | u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
3431 | u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); | 3434 | u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 997ebe420bc9..2e252aae51ca 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 | 29 | #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 |
30 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd | 30 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd |
31 | #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 | 31 | #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 |
32 | #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e | ||
32 | 33 | ||
33 | struct dwc3_pci { | 34 | struct dwc3_pci { |
34 | struct device *dev; | 35 | struct device *dev; |
@@ -189,6 +190,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = { | |||
189 | PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), | 190 | PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), |
190 | }, | 191 | }, |
191 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, | 192 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, |
193 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, | ||
192 | { } /* Terminating Entry */ | 194 | { } /* Terminating Entry */ |
193 | }; | 195 | }; |
194 | MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); | 196 | MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); |
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 1a66c5baa0d1..44cf775a8627 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c | |||
@@ -1034,37 +1034,19 @@ struct ffs_sb_fill_data { | |||
1034 | struct ffs_file_perms perms; | 1034 | struct ffs_file_perms perms; |
1035 | umode_t root_mode; | 1035 | umode_t root_mode; |
1036 | const char *dev_name; | 1036 | const char *dev_name; |
1037 | union { | 1037 | struct ffs_data *ffs_data; |
1038 | /* set by ffs_fs_mount(), read by ffs_sb_fill() */ | ||
1039 | void *private_data; | ||
1040 | /* set by ffs_sb_fill(), read by ffs_fs_mount */ | ||
1041 | struct ffs_data *ffs_data; | ||
1042 | }; | ||
1043 | }; | 1038 | }; |
1044 | 1039 | ||
1045 | static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) | 1040 | static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) |
1046 | { | 1041 | { |
1047 | struct ffs_sb_fill_data *data = _data; | 1042 | struct ffs_sb_fill_data *data = _data; |
1048 | struct inode *inode; | 1043 | struct inode *inode; |
1049 | struct ffs_data *ffs; | 1044 | struct ffs_data *ffs = data->ffs_data; |
1050 | 1045 | ||
1051 | ENTER(); | 1046 | ENTER(); |
1052 | 1047 | ||
1053 | /* Initialise data */ | ||
1054 | ffs = ffs_data_new(); | ||
1055 | if (unlikely(!ffs)) | ||
1056 | goto Enomem; | ||
1057 | |||
1058 | ffs->sb = sb; | 1048 | ffs->sb = sb; |
1059 | ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); | 1049 | data->ffs_data = NULL; |
1060 | if (unlikely(!ffs->dev_name)) | ||
1061 | goto Enomem; | ||
1062 | ffs->file_perms = data->perms; | ||
1063 | ffs->private_data = data->private_data; | ||
1064 | |||
1065 | /* used by the caller of this function */ | ||
1066 | data->ffs_data = ffs; | ||
1067 | |||
1068 | sb->s_fs_info = ffs; | 1050 | sb->s_fs_info = ffs; |
1069 | sb->s_blocksize = PAGE_CACHE_SIZE; | 1051 | sb->s_blocksize = PAGE_CACHE_SIZE; |
1070 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 1052 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
@@ -1080,17 +1062,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) | |||
1080 | &data->perms); | 1062 | &data->perms); |
1081 | sb->s_root = d_make_root(inode); | 1063 | sb->s_root = d_make_root(inode); |
1082 | if (unlikely(!sb->s_root)) | 1064 | if (unlikely(!sb->s_root)) |
1083 | goto Enomem; | 1065 | return -ENOMEM; |
1084 | 1066 | ||
1085 | /* EP0 file */ | 1067 | /* EP0 file */ |
1086 | if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, | 1068 | if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, |
1087 | &ffs_ep0_operations, NULL))) | 1069 | &ffs_ep0_operations, NULL))) |
1088 | goto Enomem; | 1070 | return -ENOMEM; |
1089 | 1071 | ||
1090 | return 0; | 1072 | return 0; |
1091 | |||
1092 | Enomem: | ||
1093 | return -ENOMEM; | ||
1094 | } | 1073 | } |
1095 | 1074 | ||
1096 | static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) | 1075 | static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) |
@@ -1193,6 +1172,7 @@ ffs_fs_mount(struct file_system_type *t, int flags, | |||
1193 | struct dentry *rv; | 1172 | struct dentry *rv; |
1194 | int ret; | 1173 | int ret; |
1195 | void *ffs_dev; | 1174 | void *ffs_dev; |
1175 | struct ffs_data *ffs; | ||
1196 | 1176 | ||
1197 | ENTER(); | 1177 | ENTER(); |
1198 | 1178 | ||
@@ -1200,18 +1180,30 @@ ffs_fs_mount(struct file_system_type *t, int flags, | |||
1200 | if (unlikely(ret < 0)) | 1180 | if (unlikely(ret < 0)) |
1201 | return ERR_PTR(ret); | 1181 | return ERR_PTR(ret); |
1202 | 1182 | ||
1183 | ffs = ffs_data_new(); | ||
1184 | if (unlikely(!ffs)) | ||
1185 | return ERR_PTR(-ENOMEM); | ||
1186 | ffs->file_perms = data.perms; | ||
1187 | |||
1188 | ffs->dev_name = kstrdup(dev_name, GFP_KERNEL); | ||
1189 | if (unlikely(!ffs->dev_name)) { | ||
1190 | ffs_data_put(ffs); | ||
1191 | return ERR_PTR(-ENOMEM); | ||
1192 | } | ||
1193 | |||
1203 | ffs_dev = functionfs_acquire_dev_callback(dev_name); | 1194 | ffs_dev = functionfs_acquire_dev_callback(dev_name); |
1204 | if (IS_ERR(ffs_dev)) | 1195 | if (IS_ERR(ffs_dev)) { |
1205 | return ffs_dev; | 1196 | ffs_data_put(ffs); |
1197 | return ERR_CAST(ffs_dev); | ||
1198 | } | ||
1199 | ffs->private_data = ffs_dev; | ||
1200 | data.ffs_data = ffs; | ||
1206 | 1201 | ||
1207 | data.dev_name = dev_name; | ||
1208 | data.private_data = ffs_dev; | ||
1209 | rv = mount_nodev(t, flags, &data, ffs_sb_fill); | 1202 | rv = mount_nodev(t, flags, &data, ffs_sb_fill); |
1210 | 1203 | if (IS_ERR(rv) && data.ffs_data) { | |
1211 | /* data.ffs_data is set by ffs_sb_fill */ | ||
1212 | if (IS_ERR(rv)) | ||
1213 | functionfs_release_dev_callback(data.ffs_data); | 1204 | functionfs_release_dev_callback(data.ffs_data); |
1214 | 1205 | ffs_data_put(data.ffs_data); | |
1206 | } | ||
1215 | return rv; | 1207 | return rv; |
1216 | } | 1208 | } |
1217 | 1209 | ||
@@ -2264,6 +2256,8 @@ static int ffs_func_bind(struct usb_configuration *c, | |||
2264 | data->raw_descs + ret, | 2256 | data->raw_descs + ret, |
2265 | (sizeof data->raw_descs) - ret, | 2257 | (sizeof data->raw_descs) - ret, |
2266 | __ffs_func_bind_do_descs, func); | 2258 | __ffs_func_bind_do_descs, func); |
2259 | if (unlikely(ret < 0)) | ||
2260 | goto error; | ||
2267 | } | 2261 | } |
2268 | 2262 | ||
2269 | /* | 2263 | /* |
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c index cc9207473dbc..0ac6064aa3b8 100644 --- a/drivers/usb/gadget/pxa25x_udc.c +++ b/drivers/usb/gadget/pxa25x_udc.c | |||
@@ -2054,7 +2054,7 @@ static struct pxa25x_udc memory = { | |||
2054 | /* | 2054 | /* |
2055 | * probe - binds to the platform device | 2055 | * probe - binds to the platform device |
2056 | */ | 2056 | */ |
2057 | static int __init pxa25x_udc_probe(struct platform_device *pdev) | 2057 | static int pxa25x_udc_probe(struct platform_device *pdev) |
2058 | { | 2058 | { |
2059 | struct pxa25x_udc *dev = &memory; | 2059 | struct pxa25x_udc *dev = &memory; |
2060 | int retval, irq; | 2060 | int retval, irq; |
@@ -2203,7 +2203,7 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev) | |||
2203 | pullup_off(); | 2203 | pullup_off(); |
2204 | } | 2204 | } |
2205 | 2205 | ||
2206 | static int __exit pxa25x_udc_remove(struct platform_device *pdev) | 2206 | static int pxa25x_udc_remove(struct platform_device *pdev) |
2207 | { | 2207 | { |
2208 | struct pxa25x_udc *dev = platform_get_drvdata(pdev); | 2208 | struct pxa25x_udc *dev = platform_get_drvdata(pdev); |
2209 | 2209 | ||
@@ -2294,7 +2294,8 @@ static int pxa25x_udc_resume(struct platform_device *dev) | |||
2294 | 2294 | ||
2295 | static struct platform_driver udc_driver = { | 2295 | static struct platform_driver udc_driver = { |
2296 | .shutdown = pxa25x_udc_shutdown, | 2296 | .shutdown = pxa25x_udc_shutdown, |
2297 | .remove = __exit_p(pxa25x_udc_remove), | 2297 | .probe = pxa25x_udc_probe, |
2298 | .remove = pxa25x_udc_remove, | ||
2298 | .suspend = pxa25x_udc_suspend, | 2299 | .suspend = pxa25x_udc_suspend, |
2299 | .resume = pxa25x_udc_resume, | 2300 | .resume = pxa25x_udc_resume, |
2300 | .driver = { | 2301 | .driver = { |
@@ -2303,7 +2304,7 @@ static struct platform_driver udc_driver = { | |||
2303 | }, | 2304 | }, |
2304 | }; | 2305 | }; |
2305 | 2306 | ||
2306 | module_platform_driver_probe(udc_driver, pxa25x_udc_probe); | 2307 | module_platform_driver(udc_driver); |
2307 | 2308 | ||
2308 | MODULE_DESCRIPTION(DRIVER_DESC); | 2309 | MODULE_DESCRIPTION(DRIVER_DESC); |
2309 | MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); | 2310 | MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell"); |
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 6bddf1aa2347..a8a99e4748d5 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c | |||
@@ -543,7 +543,7 @@ static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg, | |||
543 | * FIFO, requests of >512 cause the endpoint to get stuck with a | 543 | * FIFO, requests of >512 cause the endpoint to get stuck with a |
544 | * fragment of the end of the transfer in it. | 544 | * fragment of the end of the transfer in it. |
545 | */ | 545 | */ |
546 | if (can_write > 512) | 546 | if (can_write > 512 && !periodic) |
547 | can_write = 512; | 547 | can_write = 512; |
548 | 548 | ||
549 | /* | 549 | /* |
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 4449f565d6c6..f2407b2e8a99 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c | |||
@@ -130,7 +130,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, | |||
130 | } | 130 | } |
131 | 131 | ||
132 | /* Enable USB controller, 83xx or 8536 */ | 132 | /* Enable USB controller, 83xx or 8536 */ |
133 | if (pdata->have_sysif_regs) | 133 | if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6) |
134 | setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); | 134 | setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4); |
135 | 135 | ||
136 | /* Don't need to set host mode here. It will be done by tdi_reset() */ | 136 | /* Don't need to set host mode here. It will be done by tdi_reset() */ |
@@ -232,15 +232,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd, | |||
232 | case FSL_USB2_PHY_ULPI: | 232 | case FSL_USB2_PHY_ULPI: |
233 | if (pdata->have_sysif_regs && pdata->controller_ver) { | 233 | if (pdata->have_sysif_regs && pdata->controller_ver) { |
234 | /* controller version 1.6 or above */ | 234 | /* controller version 1.6 or above */ |
235 | clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN); | ||
235 | setbits32(non_ehci + FSL_SOC_USB_CTRL, | 236 | setbits32(non_ehci + FSL_SOC_USB_CTRL, |
236 | ULPI_PHY_CLK_SEL); | 237 | ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN); |
237 | /* | ||
238 | * Due to controller issue of PHY_CLK_VALID in ULPI | ||
239 | * mode, we set USB_CTRL_USB_EN before checking | ||
240 | * PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work. | ||
241 | */ | ||
242 | clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL, | ||
243 | UTMI_PHY_EN, USB_CTRL_USB_EN); | ||
244 | } | 238 | } |
245 | portsc |= PORT_PTS_ULPI; | 239 | portsc |= PORT_PTS_ULPI; |
246 | break; | 240 | break; |
@@ -270,8 +264,9 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd, | |||
270 | if (pdata->have_sysif_regs && pdata->controller_ver && | 264 | if (pdata->have_sysif_regs && pdata->controller_ver && |
271 | (phy_mode == FSL_USB2_PHY_ULPI)) { | 265 | (phy_mode == FSL_USB2_PHY_ULPI)) { |
272 | /* check PHY_CLK_VALID to get phy clk valid */ | 266 | /* check PHY_CLK_VALID to get phy clk valid */ |
273 | if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & | 267 | if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & |
274 | PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) { | 268 | PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) || |
269 | in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) { | ||
275 | printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); | 270 | printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n"); |
276 | return -EINVAL; | 271 | return -EINVAL; |
277 | } | 272 | } |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 6bd299e61f58..854c2ec7b699 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -361,7 +361,7 @@ static struct pci_driver ehci_pci_driver = { | |||
361 | .remove = usb_hcd_pci_remove, | 361 | .remove = usb_hcd_pci_remove, |
362 | .shutdown = usb_hcd_pci_shutdown, | 362 | .shutdown = usb_hcd_pci_shutdown, |
363 | 363 | ||
364 | #ifdef CONFIG_PM_SLEEP | 364 | #ifdef CONFIG_PM |
365 | .driver = { | 365 | .driver = { |
366 | .pm = &usb_hcd_pci_pm_ops | 366 | .pm = &usb_hcd_pci_pm_ops |
367 | }, | 367 | }, |
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c index 60a5de505ca1..adb01d950a16 100644 --- a/drivers/usb/host/imx21-hcd.c +++ b/drivers/usb/host/imx21-hcd.c | |||
@@ -824,13 +824,13 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, | |||
824 | i = DIV_ROUND_UP(wrap_frame( | 824 | i = DIV_ROUND_UP(wrap_frame( |
825 | cur_frame - urb->start_frame), | 825 | cur_frame - urb->start_frame), |
826 | urb->interval); | 826 | urb->interval); |
827 | if (urb->transfer_flags & URB_ISO_ASAP) { | 827 | |
828 | /* Treat underruns as if URB_ISO_ASAP was set */ | ||
829 | if ((urb->transfer_flags & URB_ISO_ASAP) || | ||
830 | i >= urb->number_of_packets) { | ||
828 | urb->start_frame = wrap_frame(urb->start_frame | 831 | urb->start_frame = wrap_frame(urb->start_frame |
829 | + i * urb->interval); | 832 | + i * urb->interval); |
830 | i = 0; | 833 | i = 0; |
831 | } else if (i >= urb->number_of_packets) { | ||
832 | ret = -EXDEV; | ||
833 | goto alloc_dmem_failed; | ||
834 | } | 834 | } |
835 | } | 835 | } |
836 | } | 836 | } |
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 8f6b695af6a4..604cad1bcf9c 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -216,31 +216,26 @@ static int ohci_urb_enqueue ( | |||
216 | frame &= ~(ed->interval - 1); | 216 | frame &= ~(ed->interval - 1); |
217 | frame |= ed->branch; | 217 | frame |= ed->branch; |
218 | urb->start_frame = frame; | 218 | urb->start_frame = frame; |
219 | ed->last_iso = frame + ed->interval * (size - 1); | ||
219 | } | 220 | } |
220 | } else if (ed->type == PIPE_ISOCHRONOUS) { | 221 | } else if (ed->type == PIPE_ISOCHRONOUS) { |
221 | u16 next = ohci_frame_no(ohci) + 1; | 222 | u16 next = ohci_frame_no(ohci) + 1; |
222 | u16 frame = ed->last_iso + ed->interval; | 223 | u16 frame = ed->last_iso + ed->interval; |
224 | u16 length = ed->interval * (size - 1); | ||
223 | 225 | ||
224 | /* Behind the scheduling threshold? */ | 226 | /* Behind the scheduling threshold? */ |
225 | if (unlikely(tick_before(frame, next))) { | 227 | if (unlikely(tick_before(frame, next))) { |
226 | 228 | ||
227 | /* USB_ISO_ASAP: Round up to the first available slot */ | 229 | /* URB_ISO_ASAP: Round up to the first available slot */ |
228 | if (urb->transfer_flags & URB_ISO_ASAP) { | 230 | if (urb->transfer_flags & URB_ISO_ASAP) { |
229 | frame += (next - frame + ed->interval - 1) & | 231 | frame += (next - frame + ed->interval - 1) & |
230 | -ed->interval; | 232 | -ed->interval; |
231 | 233 | ||
232 | /* | 234 | /* |
233 | * Not ASAP: Use the next slot in the stream. If | 235 | * Not ASAP: Use the next slot in the stream, |
234 | * the entire URB falls before the threshold, fail. | 236 | * no matter what. |
235 | */ | 237 | */ |
236 | } else { | 238 | } else { |
237 | if (tick_before(frame + ed->interval * | ||
238 | (urb->number_of_packets - 1), next)) { | ||
239 | retval = -EXDEV; | ||
240 | usb_hcd_unlink_urb_from_ep(hcd, urb); | ||
241 | goto fail; | ||
242 | } | ||
243 | |||
244 | /* | 239 | /* |
245 | * Some OHCI hardware doesn't handle late TDs | 240 | * Some OHCI hardware doesn't handle late TDs |
246 | * correctly. After retiring them it proceeds | 241 | * correctly. After retiring them it proceeds |
@@ -251,9 +246,16 @@ static int ohci_urb_enqueue ( | |||
251 | urb_priv->td_cnt = DIV_ROUND_UP( | 246 | urb_priv->td_cnt = DIV_ROUND_UP( |
252 | (u16) (next - frame), | 247 | (u16) (next - frame), |
253 | ed->interval); | 248 | ed->interval); |
249 | if (urb_priv->td_cnt >= urb_priv->length) { | ||
250 | ++urb_priv->td_cnt; /* Mark it */ | ||
251 | ohci_dbg(ohci, "iso underrun %p (%u+%u < %u)\n", | ||
252 | urb, frame, length, | ||
253 | next); | ||
254 | } | ||
254 | } | 255 | } |
255 | } | 256 | } |
256 | urb->start_frame = frame; | 257 | urb->start_frame = frame; |
258 | ed->last_iso = frame + length; | ||
257 | } | 259 | } |
258 | 260 | ||
259 | /* fill the TDs and link them to the ed; and | 261 | /* fill the TDs and link them to the ed; and |
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index df4a6707322d..e7f577e63624 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
@@ -41,9 +41,13 @@ finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status) | |||
41 | __releases(ohci->lock) | 41 | __releases(ohci->lock) |
42 | __acquires(ohci->lock) | 42 | __acquires(ohci->lock) |
43 | { | 43 | { |
44 | struct device *dev = ohci_to_hcd(ohci)->self.controller; | 44 | struct device *dev = ohci_to_hcd(ohci)->self.controller; |
45 | struct usb_host_endpoint *ep = urb->ep; | ||
46 | struct urb_priv *urb_priv; | ||
47 | |||
45 | // ASSERT (urb->hcpriv != 0); | 48 | // ASSERT (urb->hcpriv != 0); |
46 | 49 | ||
50 | restart: | ||
47 | urb_free_priv (ohci, urb->hcpriv); | 51 | urb_free_priv (ohci, urb->hcpriv); |
48 | urb->hcpriv = NULL; | 52 | urb->hcpriv = NULL; |
49 | if (likely(status == -EINPROGRESS)) | 53 | if (likely(status == -EINPROGRESS)) |
@@ -80,6 +84,21 @@ __acquires(ohci->lock) | |||
80 | ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); | 84 | ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE); |
81 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); | 85 | ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); |
82 | } | 86 | } |
87 | |||
88 | /* | ||
89 | * An isochronous URB that is sumitted too late won't have any TDs | ||
90 | * (marked by the fact that the td_cnt value is larger than the | ||
91 | * actual number of TDs). If the next URB on this endpoint is like | ||
92 | * that, give it back now. | ||
93 | */ | ||
94 | if (!list_empty(&ep->urb_list)) { | ||
95 | urb = list_first_entry(&ep->urb_list, struct urb, urb_list); | ||
96 | urb_priv = urb->hcpriv; | ||
97 | if (urb_priv->td_cnt > urb_priv->length) { | ||
98 | status = 0; | ||
99 | goto restart; | ||
100 | } | ||
101 | } | ||
83 | } | 102 | } |
84 | 103 | ||
85 | 104 | ||
@@ -546,7 +565,6 @@ td_fill (struct ohci_hcd *ohci, u32 info, | |||
546 | td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); | 565 | td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000); |
547 | *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, | 566 | *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci, |
548 | (data & 0x0FFF) | 0xE000); | 567 | (data & 0x0FFF) | 0xE000); |
549 | td->ed->last_iso = info & 0xffff; | ||
550 | } else { | 568 | } else { |
551 | td->hwCBP = cpu_to_hc32 (ohci, data); | 569 | td->hwCBP = cpu_to_hc32 (ohci, data); |
552 | } | 570 | } |
@@ -996,7 +1014,7 @@ rescan_this: | |||
996 | urb_priv->td_cnt++; | 1014 | urb_priv->td_cnt++; |
997 | 1015 | ||
998 | /* if URB is done, clean up */ | 1016 | /* if URB is done, clean up */ |
999 | if (urb_priv->td_cnt == urb_priv->length) { | 1017 | if (urb_priv->td_cnt >= urb_priv->length) { |
1000 | modified = completed = 1; | 1018 | modified = completed = 1; |
1001 | finish_urb(ohci, urb, 0); | 1019 | finish_urb(ohci, urb, 0); |
1002 | } | 1020 | } |
@@ -1086,7 +1104,7 @@ static void takeback_td(struct ohci_hcd *ohci, struct td *td) | |||
1086 | urb_priv->td_cnt++; | 1104 | urb_priv->td_cnt++; |
1087 | 1105 | ||
1088 | /* If all this urb's TDs are done, call complete() */ | 1106 | /* If all this urb's TDs are done, call complete() */ |
1089 | if (urb_priv->td_cnt == urb_priv->length) | 1107 | if (urb_priv->td_cnt >= urb_priv->length) |
1090 | finish_urb(ohci, urb, status); | 1108 | finish_urb(ohci, urb, status); |
1091 | 1109 | ||
1092 | /* clean schedule: unlink EDs that are no longer busy */ | 1110 | /* clean schedule: unlink EDs that are no longer busy */ |
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index c300bd2f7d1c..0f228c46eeda 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c | |||
@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = { | |||
293 | .remove = usb_hcd_pci_remove, | 293 | .remove = usb_hcd_pci_remove, |
294 | .shutdown = uhci_shutdown, | 294 | .shutdown = uhci_shutdown, |
295 | 295 | ||
296 | #ifdef CONFIG_PM_SLEEP | 296 | #ifdef CONFIG_PM |
297 | .driver = { | 297 | .driver = { |
298 | .pm = &usb_hcd_pci_pm_ops | 298 | .pm = &usb_hcd_pci_pm_ops |
299 | }, | 299 | }, |
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 041c6ddb695c..da6f56d996ce 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c | |||
@@ -1303,7 +1303,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1303 | } | 1303 | } |
1304 | 1304 | ||
1305 | /* Fell behind? */ | 1305 | /* Fell behind? */ |
1306 | if (uhci_frame_before_eq(frame, next)) { | 1306 | if (!uhci_frame_before_eq(next, frame)) { |
1307 | 1307 | ||
1308 | /* USB_ISO_ASAP: Round up to the first available slot */ | 1308 | /* USB_ISO_ASAP: Round up to the first available slot */ |
1309 | if (urb->transfer_flags & URB_ISO_ASAP) | 1309 | if (urb->transfer_flags & URB_ISO_ASAP) |
@@ -1311,13 +1311,17 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, | |||
1311 | -qh->period; | 1311 | -qh->period; |
1312 | 1312 | ||
1313 | /* | 1313 | /* |
1314 | * Not ASAP: Use the next slot in the stream. If | 1314 | * Not ASAP: Use the next slot in the stream, |
1315 | * the entire URB falls before the threshold, fail. | 1315 | * no matter what. |
1316 | */ | 1316 | */ |
1317 | else if (!uhci_frame_before_eq(next, | 1317 | else if (!uhci_frame_before_eq(next, |
1318 | frame + (urb->number_of_packets - 1) * | 1318 | frame + (urb->number_of_packets - 1) * |
1319 | qh->period)) | 1319 | qh->period)) |
1320 | return -EXDEV; | 1320 | dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n", |
1321 | urb, frame, | ||
1322 | (urb->number_of_packets - 1) * | ||
1323 | qh->period, | ||
1324 | next); | ||
1321 | } | 1325 | } |
1322 | } | 1326 | } |
1323 | 1327 | ||
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index fae697ed0b70..773a6b28c4f1 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -287,7 +287,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
287 | if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) | 287 | if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) |
288 | xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); | 288 | xhci_queue_stop_endpoint(xhci, slot_id, i, suspend); |
289 | } | 289 | } |
290 | cmd->command_trb = xhci->cmd_ring->enqueue; | 290 | cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); |
291 | list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); | 291 | list_add_tail(&cmd->cmd_list, &virt_dev->cmd_list); |
292 | xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); | 292 | xhci_queue_stop_endpoint(xhci, slot_id, 0, suspend); |
293 | xhci_ring_cmd_db(xhci); | 293 | xhci_ring_cmd_db(xhci); |
@@ -552,11 +552,15 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex) | |||
552 | * - Mark a port as being done with device resume, | 552 | * - Mark a port as being done with device resume, |
553 | * and ring the endpoint doorbells. | 553 | * and ring the endpoint doorbells. |
554 | * - Stop the Synopsys redriver Compliance Mode polling. | 554 | * - Stop the Synopsys redriver Compliance Mode polling. |
555 | * - Drop and reacquire the xHCI lock, in order to wait for port resume. | ||
555 | */ | 556 | */ |
556 | static u32 xhci_get_port_status(struct usb_hcd *hcd, | 557 | static u32 xhci_get_port_status(struct usb_hcd *hcd, |
557 | struct xhci_bus_state *bus_state, | 558 | struct xhci_bus_state *bus_state, |
558 | __le32 __iomem **port_array, | 559 | __le32 __iomem **port_array, |
559 | u16 wIndex, u32 raw_port_status) | 560 | u16 wIndex, u32 raw_port_status, |
561 | unsigned long flags) | ||
562 | __releases(&xhci->lock) | ||
563 | __acquires(&xhci->lock) | ||
560 | { | 564 | { |
561 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 565 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
562 | u32 status = 0; | 566 | u32 status = 0; |
@@ -591,21 +595,42 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
591 | return 0xffffffff; | 595 | return 0xffffffff; |
592 | if (time_after_eq(jiffies, | 596 | if (time_after_eq(jiffies, |
593 | bus_state->resume_done[wIndex])) { | 597 | bus_state->resume_done[wIndex])) { |
598 | int time_left; | ||
599 | |||
594 | xhci_dbg(xhci, "Resume USB2 port %d\n", | 600 | xhci_dbg(xhci, "Resume USB2 port %d\n", |
595 | wIndex + 1); | 601 | wIndex + 1); |
596 | bus_state->resume_done[wIndex] = 0; | 602 | bus_state->resume_done[wIndex] = 0; |
597 | clear_bit(wIndex, &bus_state->resuming_ports); | 603 | clear_bit(wIndex, &bus_state->resuming_ports); |
604 | |||
605 | set_bit(wIndex, &bus_state->rexit_ports); | ||
598 | xhci_set_link_state(xhci, port_array, wIndex, | 606 | xhci_set_link_state(xhci, port_array, wIndex, |
599 | XDEV_U0); | 607 | XDEV_U0); |
600 | xhci_dbg(xhci, "set port %d resume\n", | 608 | |
601 | wIndex + 1); | 609 | spin_unlock_irqrestore(&xhci->lock, flags); |
602 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | 610 | time_left = wait_for_completion_timeout( |
603 | wIndex + 1); | 611 | &bus_state->rexit_done[wIndex], |
604 | if (!slot_id) { | 612 | msecs_to_jiffies( |
605 | xhci_dbg(xhci, "slot_id is zero\n"); | 613 | XHCI_MAX_REXIT_TIMEOUT)); |
606 | return 0xffffffff; | 614 | spin_lock_irqsave(&xhci->lock, flags); |
615 | |||
616 | if (time_left) { | ||
617 | slot_id = xhci_find_slot_id_by_port(hcd, | ||
618 | xhci, wIndex + 1); | ||
619 | if (!slot_id) { | ||
620 | xhci_dbg(xhci, "slot_id is zero\n"); | ||
621 | return 0xffffffff; | ||
622 | } | ||
623 | xhci_ring_device(xhci, slot_id); | ||
624 | } else { | ||
625 | int port_status = xhci_readl(xhci, | ||
626 | port_array[wIndex]); | ||
627 | xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", | ||
628 | XHCI_MAX_REXIT_TIMEOUT, | ||
629 | port_status); | ||
630 | status |= USB_PORT_STAT_SUSPEND; | ||
631 | clear_bit(wIndex, &bus_state->rexit_ports); | ||
607 | } | 632 | } |
608 | xhci_ring_device(xhci, slot_id); | 633 | |
609 | bus_state->port_c_suspend |= 1 << wIndex; | 634 | bus_state->port_c_suspend |= 1 << wIndex; |
610 | bus_state->suspended_ports &= ~(1 << wIndex); | 635 | bus_state->suspended_ports &= ~(1 << wIndex); |
611 | } else { | 636 | } else { |
@@ -728,7 +753,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
728 | break; | 753 | break; |
729 | } | 754 | } |
730 | status = xhci_get_port_status(hcd, bus_state, port_array, | 755 | status = xhci_get_port_status(hcd, bus_state, port_array, |
731 | wIndex, temp); | 756 | wIndex, temp, flags); |
732 | if (status == 0xffffffff) | 757 | if (status == 0xffffffff) |
733 | goto error; | 758 | goto error; |
734 | 759 | ||
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 53b972c2a09f..83bcd13622c3 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -2428,6 +2428,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2428 | for (i = 0; i < USB_MAXCHILDREN; ++i) { | 2428 | for (i = 0; i < USB_MAXCHILDREN; ++i) { |
2429 | xhci->bus_state[0].resume_done[i] = 0; | 2429 | xhci->bus_state[0].resume_done[i] = 0; |
2430 | xhci->bus_state[1].resume_done[i] = 0; | 2430 | xhci->bus_state[1].resume_done[i] = 0; |
2431 | /* Only the USB 2.0 completions will ever be used. */ | ||
2432 | init_completion(&xhci->bus_state[1].rexit_done[i]); | ||
2431 | } | 2433 | } |
2432 | 2434 | ||
2433 | if (scratchpad_alloc(xhci, flags)) | 2435 | if (scratchpad_alloc(xhci, flags)) |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c2d495057eb5..236c3aabe940 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -351,7 +351,7 @@ static struct pci_driver xhci_pci_driver = { | |||
351 | /* suspend and resume implemented later */ | 351 | /* suspend and resume implemented later */ |
352 | 352 | ||
353 | .shutdown = usb_hcd_pci_shutdown, | 353 | .shutdown = usb_hcd_pci_shutdown, |
354 | #ifdef CONFIG_PM_SLEEP | 354 | #ifdef CONFIG_PM |
355 | .driver = { | 355 | .driver = { |
356 | .pm = &usb_hcd_pci_pm_ops | 356 | .pm = &usb_hcd_pci_pm_ops |
357 | }, | 357 | }, |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 411da1fc7ae8..6bfbd80ec2b9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -123,6 +123,16 @@ static int enqueue_is_link_trb(struct xhci_ring *ring) | |||
123 | return TRB_TYPE_LINK_LE32(link->control); | 123 | return TRB_TYPE_LINK_LE32(link->control); |
124 | } | 124 | } |
125 | 125 | ||
126 | union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring) | ||
127 | { | ||
128 | /* Enqueue pointer can be left pointing to the link TRB, | ||
129 | * we must handle that | ||
130 | */ | ||
131 | if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control)) | ||
132 | return ring->enq_seg->next->trbs; | ||
133 | return ring->enqueue; | ||
134 | } | ||
135 | |||
126 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next | 136 | /* Updates trb to point to the next TRB in the ring, and updates seg if the next |
127 | * TRB is in a new segment. This does not skip over link TRBs, and it does not | 137 | * TRB is in a new segment. This does not skip over link TRBs, and it does not |
128 | * effect the ring dequeue or enqueue pointers. | 138 | * effect the ring dequeue or enqueue pointers. |
@@ -859,8 +869,12 @@ remove_finished_td: | |||
859 | /* Otherwise ring the doorbell(s) to restart queued transfers */ | 869 | /* Otherwise ring the doorbell(s) to restart queued transfers */ |
860 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); | 870 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
861 | } | 871 | } |
862 | ep->stopped_td = NULL; | 872 | |
863 | ep->stopped_trb = NULL; | 873 | /* Clear stopped_td and stopped_trb if endpoint is not halted */ |
874 | if (!(ep->ep_state & EP_HALTED)) { | ||
875 | ep->stopped_td = NULL; | ||
876 | ep->stopped_trb = NULL; | ||
877 | } | ||
864 | 878 | ||
865 | /* | 879 | /* |
866 | * Drop the lock and complete the URBs in the cancelled TD list. | 880 | * Drop the lock and complete the URBs in the cancelled TD list. |
@@ -1414,6 +1428,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1414 | inc_deq(xhci, xhci->cmd_ring); | 1428 | inc_deq(xhci, xhci->cmd_ring); |
1415 | return; | 1429 | return; |
1416 | } | 1430 | } |
1431 | /* There is no command to handle if we get a stop event when the | ||
1432 | * command ring is empty, event->cmd_trb points to the next | ||
1433 | * unset command | ||
1434 | */ | ||
1435 | if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) | ||
1436 | return; | ||
1417 | } | 1437 | } |
1418 | 1438 | ||
1419 | switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) | 1439 | switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) |
@@ -1743,6 +1763,19 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1743 | } | 1763 | } |
1744 | } | 1764 | } |
1745 | 1765 | ||
1766 | /* | ||
1767 | * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or | ||
1768 | * RExit to a disconnect state). If so, let the the driver know it's | ||
1769 | * out of the RExit state. | ||
1770 | */ | ||
1771 | if (!DEV_SUPERSPEED(temp) && | ||
1772 | test_and_clear_bit(faked_port_index, | ||
1773 | &bus_state->rexit_ports)) { | ||
1774 | complete(&bus_state->rexit_done[faked_port_index]); | ||
1775 | bogus_port_status = true; | ||
1776 | goto cleanup; | ||
1777 | } | ||
1778 | |||
1746 | if (hcd->speed != HCD_USB3) | 1779 | if (hcd->speed != HCD_USB3) |
1747 | xhci_test_and_clear_bit(xhci, port_array, faked_port_index, | 1780 | xhci_test_and_clear_bit(xhci, port_array, faked_port_index, |
1748 | PORT_PLC); | 1781 | PORT_PLC); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 49b6edb84a79..1e36dbb48366 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -2598,15 +2598,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
2598 | if (command) { | 2598 | if (command) { |
2599 | cmd_completion = command->completion; | 2599 | cmd_completion = command->completion; |
2600 | cmd_status = &command->status; | 2600 | cmd_status = &command->status; |
2601 | command->command_trb = xhci->cmd_ring->enqueue; | 2601 | command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); |
2602 | |||
2603 | /* Enqueue pointer can be left pointing to the link TRB, | ||
2604 | * we must handle that | ||
2605 | */ | ||
2606 | if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) | ||
2607 | command->command_trb = | ||
2608 | xhci->cmd_ring->enq_seg->next->trbs; | ||
2609 | |||
2610 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); | 2602 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
2611 | } else { | 2603 | } else { |
2612 | cmd_completion = &virt_dev->cmd_completion; | 2604 | cmd_completion = &virt_dev->cmd_completion; |
@@ -2614,7 +2606,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, | |||
2614 | } | 2606 | } |
2615 | init_completion(cmd_completion); | 2607 | init_completion(cmd_completion); |
2616 | 2608 | ||
2617 | cmd_trb = xhci->cmd_ring->dequeue; | 2609 | cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); |
2618 | if (!ctx_change) | 2610 | if (!ctx_change) |
2619 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, | 2611 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, |
2620 | udev->slot_id, must_succeed); | 2612 | udev->slot_id, must_succeed); |
@@ -3439,14 +3431,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3439 | 3431 | ||
3440 | /* Attempt to submit the Reset Device command to the command ring */ | 3432 | /* Attempt to submit the Reset Device command to the command ring */ |
3441 | spin_lock_irqsave(&xhci->lock, flags); | 3433 | spin_lock_irqsave(&xhci->lock, flags); |
3442 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; | 3434 | reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring); |
3443 | |||
3444 | /* Enqueue pointer can be left pointing to the link TRB, | ||
3445 | * we must handle that | ||
3446 | */ | ||
3447 | if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) | ||
3448 | reset_device_cmd->command_trb = | ||
3449 | xhci->cmd_ring->enq_seg->next->trbs; | ||
3450 | 3435 | ||
3451 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); | 3436 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); |
3452 | ret = xhci_queue_reset_device(xhci, slot_id); | 3437 | ret = xhci_queue_reset_device(xhci, slot_id); |
@@ -3650,7 +3635,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3650 | union xhci_trb *cmd_trb; | 3635 | union xhci_trb *cmd_trb; |
3651 | 3636 | ||
3652 | spin_lock_irqsave(&xhci->lock, flags); | 3637 | spin_lock_irqsave(&xhci->lock, flags); |
3653 | cmd_trb = xhci->cmd_ring->dequeue; | 3638 | cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); |
3654 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); | 3639 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); |
3655 | if (ret) { | 3640 | if (ret) { |
3656 | spin_unlock_irqrestore(&xhci->lock, flags); | 3641 | spin_unlock_irqrestore(&xhci->lock, flags); |
@@ -3785,7 +3770,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3785 | slot_ctx->dev_info >> 27); | 3770 | slot_ctx->dev_info >> 27); |
3786 | 3771 | ||
3787 | spin_lock_irqsave(&xhci->lock, flags); | 3772 | spin_lock_irqsave(&xhci->lock, flags); |
3788 | cmd_trb = xhci->cmd_ring->dequeue; | 3773 | cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring); |
3789 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, | 3774 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
3790 | udev->slot_id); | 3775 | udev->slot_id); |
3791 | if (ret) { | 3776 | if (ret) { |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 46aa14894148..289fbfbae746 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1412,8 +1412,18 @@ struct xhci_bus_state { | |||
1412 | unsigned long resume_done[USB_MAXCHILDREN]; | 1412 | unsigned long resume_done[USB_MAXCHILDREN]; |
1413 | /* which ports have started to resume */ | 1413 | /* which ports have started to resume */ |
1414 | unsigned long resuming_ports; | 1414 | unsigned long resuming_ports; |
1415 | /* Which ports are waiting on RExit to U0 transition. */ | ||
1416 | unsigned long rexit_ports; | ||
1417 | struct completion rexit_done[USB_MAXCHILDREN]; | ||
1415 | }; | 1418 | }; |
1416 | 1419 | ||
1420 | |||
1421 | /* | ||
1422 | * It can take up to 20 ms to transition from RExit to U0 on the | ||
1423 | * Intel Lynx Point LP xHCI host. | ||
1424 | */ | ||
1425 | #define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) | ||
1426 | |||
1417 | static inline unsigned int hcd_index(struct usb_hcd *hcd) | 1427 | static inline unsigned int hcd_index(struct usb_hcd *hcd) |
1418 | { | 1428 | { |
1419 | if (hcd->speed == HCD_USB3) | 1429 | if (hcd->speed == HCD_USB3) |
@@ -1840,6 +1850,7 @@ int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, | |||
1840 | union xhci_trb *cmd_trb); | 1850 | union xhci_trb *cmd_trb); |
1841 | void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, | 1851 | void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, |
1842 | unsigned int ep_index, unsigned int stream_id); | 1852 | unsigned int ep_index, unsigned int stream_id); |
1853 | union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring); | ||
1843 | 1854 | ||
1844 | /* xHCI roothub code */ | 1855 | /* xHCI roothub code */ |
1845 | void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, | 1856 | void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array, |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 4047cbb91bac..bd4138d80a48 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
@@ -535,6 +535,9 @@ static int dsps_probe(struct platform_device *pdev) | |||
535 | struct dsps_glue *glue; | 535 | struct dsps_glue *glue; |
536 | int ret; | 536 | int ret; |
537 | 537 | ||
538 | if (!strcmp(pdev->name, "musb-hdrc")) | ||
539 | return -ENODEV; | ||
540 | |||
538 | match = of_match_node(musb_dsps_of_match, pdev->dev.of_node); | 541 | match = of_match_node(musb_dsps_of_match, pdev->dev.of_node); |
539 | if (!match) { | 542 | if (!match) { |
540 | dev_err(&pdev->dev, "fail to get matching of_match struct\n"); | 543 | dev_err(&pdev->dev, "fail to get matching of_match struct\n"); |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 9a08679d204d..b19ed213ab85 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -1790,6 +1790,10 @@ int musb_gadget_setup(struct musb *musb) | |||
1790 | musb->g.max_speed = USB_SPEED_HIGH; | 1790 | musb->g.max_speed = USB_SPEED_HIGH; |
1791 | musb->g.speed = USB_SPEED_UNKNOWN; | 1791 | musb->g.speed = USB_SPEED_UNKNOWN; |
1792 | 1792 | ||
1793 | MUSB_DEV_MODE(musb); | ||
1794 | musb->xceiv->otg->default_a = 0; | ||
1795 | musb->xceiv->state = OTG_STATE_B_IDLE; | ||
1796 | |||
1793 | /* this "gadget" abstracts/virtualizes the controller */ | 1797 | /* this "gadget" abstracts/virtualizes the controller */ |
1794 | musb->g.name = musb_driver_name; | 1798 | musb->g.name = musb_driver_name; |
1795 | musb->g.is_otg = 1; | 1799 | musb->g.is_otg = 1; |
@@ -1849,7 +1853,6 @@ static int musb_gadget_start(struct usb_gadget *g, | |||
1849 | musb->gadget_driver = driver; | 1853 | musb->gadget_driver = driver; |
1850 | 1854 | ||
1851 | spin_lock_irqsave(&musb->lock, flags); | 1855 | spin_lock_irqsave(&musb->lock, flags); |
1852 | musb->is_active = 1; | ||
1853 | 1856 | ||
1854 | otg_set_peripheral(otg, &musb->g); | 1857 | otg_set_peripheral(otg, &musb->g); |
1855 | musb->xceiv->state = OTG_STATE_B_IDLE; | 1858 | musb->xceiv->state = OTG_STATE_B_IDLE; |
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c index b2f29c9aebbf..02799a5efcd4 100644 --- a/drivers/usb/phy/phy-gpio-vbus-usb.c +++ b/drivers/usb/phy/phy-gpio-vbus-usb.c | |||
@@ -241,7 +241,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend) | |||
241 | 241 | ||
242 | /* platform driver interface */ | 242 | /* platform driver interface */ |
243 | 243 | ||
244 | static int __init gpio_vbus_probe(struct platform_device *pdev) | 244 | static int gpio_vbus_probe(struct platform_device *pdev) |
245 | { | 245 | { |
246 | struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); | 246 | struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); |
247 | struct gpio_vbus_data *gpio_vbus; | 247 | struct gpio_vbus_data *gpio_vbus; |
@@ -349,7 +349,7 @@ err_gpio: | |||
349 | return err; | 349 | return err; |
350 | } | 350 | } |
351 | 351 | ||
352 | static int __exit gpio_vbus_remove(struct platform_device *pdev) | 352 | static int gpio_vbus_remove(struct platform_device *pdev) |
353 | { | 353 | { |
354 | struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev); | 354 | struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev); |
355 | struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); | 355 | struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev); |
@@ -398,8 +398,6 @@ static const struct dev_pm_ops gpio_vbus_dev_pm_ops = { | |||
398 | }; | 398 | }; |
399 | #endif | 399 | #endif |
400 | 400 | ||
401 | /* NOTE: the gpio-vbus device may *NOT* be hotplugged */ | ||
402 | |||
403 | MODULE_ALIAS("platform:gpio-vbus"); | 401 | MODULE_ALIAS("platform:gpio-vbus"); |
404 | 402 | ||
405 | static struct platform_driver gpio_vbus_driver = { | 403 | static struct platform_driver gpio_vbus_driver = { |
@@ -410,10 +408,11 @@ static struct platform_driver gpio_vbus_driver = { | |||
410 | .pm = &gpio_vbus_dev_pm_ops, | 408 | .pm = &gpio_vbus_dev_pm_ops, |
411 | #endif | 409 | #endif |
412 | }, | 410 | }, |
413 | .remove = __exit_p(gpio_vbus_remove), | 411 | .probe = gpio_vbus_probe, |
412 | .remove = gpio_vbus_remove, | ||
414 | }; | 413 | }; |
415 | 414 | ||
416 | module_platform_driver_probe(gpio_vbus_driver, gpio_vbus_probe); | 415 | module_platform_driver(gpio_vbus_driver); |
417 | 416 | ||
418 | MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver"); | 417 | MODULE_DESCRIPTION("simple GPIO controlled OTG transceiver driver"); |
419 | MODULE_AUTHOR("Philipp Zabel"); | 418 | MODULE_AUTHOR("Philipp Zabel"); |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 1cf6f125f5f0..80a7104d5ddb 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb); | |||
81 | 81 | ||
82 | #define HUAWEI_VENDOR_ID 0x12D1 | 82 | #define HUAWEI_VENDOR_ID 0x12D1 |
83 | #define HUAWEI_PRODUCT_E173 0x140C | 83 | #define HUAWEI_PRODUCT_E173 0x140C |
84 | #define HUAWEI_PRODUCT_E1750 0x1406 | ||
84 | #define HUAWEI_PRODUCT_K4505 0x1464 | 85 | #define HUAWEI_PRODUCT_K4505 0x1464 |
85 | #define HUAWEI_PRODUCT_K3765 0x1465 | 86 | #define HUAWEI_PRODUCT_K3765 0x1465 |
86 | #define HUAWEI_PRODUCT_K4605 0x14C6 | 87 | #define HUAWEI_PRODUCT_K4605 0x14C6 |
@@ -567,6 +568,8 @@ static const struct usb_device_id option_ids[] = { | |||
567 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, | 568 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, |
568 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), | 569 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), |
569 | .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, | 570 | .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, |
571 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), | ||
572 | .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, | ||
570 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, | 573 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, |
571 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, | 574 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, |
572 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), | 575 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index a9807dea3887..4fb7a8f83c8a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
545 | long npage; | 545 | long npage; |
546 | int ret = 0, prot = 0; | 546 | int ret = 0, prot = 0; |
547 | uint64_t mask; | 547 | uint64_t mask; |
548 | struct vfio_dma *dma = NULL; | ||
549 | unsigned long pfn; | ||
548 | 550 | ||
549 | end = map->iova + map->size; | 551 | end = map->iova + map->size; |
550 | 552 | ||
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
587 | } | 589 | } |
588 | 590 | ||
589 | for (iova = map->iova; iova < end; iova += size, vaddr += size) { | 591 | for (iova = map->iova; iova < end; iova += size, vaddr += size) { |
590 | struct vfio_dma *dma = NULL; | ||
591 | unsigned long pfn; | ||
592 | long i; | 592 | long i; |
593 | 593 | ||
594 | /* Pin a contiguous chunk of memory */ | 594 | /* Pin a contiguous chunk of memory */ |
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
597 | if (npage <= 0) { | 597 | if (npage <= 0) { |
598 | WARN_ON(!npage); | 598 | WARN_ON(!npage); |
599 | ret = (int)npage; | 599 | ret = (int)npage; |
600 | break; | 600 | goto out; |
601 | } | 601 | } |
602 | 602 | ||
603 | /* Verify pages are not already mapped */ | 603 | /* Verify pages are not already mapped */ |
604 | for (i = 0; i < npage; i++) { | 604 | for (i = 0; i < npage; i++) { |
605 | if (iommu_iova_to_phys(iommu->domain, | 605 | if (iommu_iova_to_phys(iommu->domain, |
606 | iova + (i << PAGE_SHIFT))) { | 606 | iova + (i << PAGE_SHIFT))) { |
607 | vfio_unpin_pages(pfn, npage, prot, true); | ||
608 | ret = -EBUSY; | 607 | ret = -EBUSY; |
609 | break; | 608 | goto out_unpin; |
610 | } | 609 | } |
611 | } | 610 | } |
612 | 611 | ||
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
616 | if (ret) { | 615 | if (ret) { |
617 | if (ret != -EBUSY || | 616 | if (ret != -EBUSY || |
618 | map_try_harder(iommu, iova, pfn, npage, prot)) { | 617 | map_try_harder(iommu, iova, pfn, npage, prot)) { |
619 | vfio_unpin_pages(pfn, npage, prot, true); | 618 | goto out_unpin; |
620 | break; | ||
621 | } | 619 | } |
622 | } | 620 | } |
623 | 621 | ||
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
672 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); | 670 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
673 | if (!dma) { | 671 | if (!dma) { |
674 | iommu_unmap(iommu->domain, iova, size); | 672 | iommu_unmap(iommu->domain, iova, size); |
675 | vfio_unpin_pages(pfn, npage, prot, true); | ||
676 | ret = -ENOMEM; | 673 | ret = -ENOMEM; |
677 | break; | 674 | goto out_unpin; |
678 | } | 675 | } |
679 | 676 | ||
680 | dma->size = size; | 677 | dma->size = size; |
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | |||
685 | } | 682 | } |
686 | } | 683 | } |
687 | 684 | ||
688 | if (ret) { | 685 | WARN_ON(ret); |
689 | struct vfio_dma *tmp; | 686 | mutex_unlock(&iommu->lock); |
690 | iova = map->iova; | 687 | return ret; |
691 | size = map->size; | 688 | |
692 | while ((tmp = vfio_find_dma(iommu, iova, size))) { | 689 | out_unpin: |
693 | int r = vfio_remove_dma_overlap(iommu, iova, | 690 | vfio_unpin_pages(pfn, npage, prot, true); |
694 | &size, tmp); | 691 | |
695 | if (WARN_ON(r || !size)) | 692 | out: |
696 | break; | 693 | iova = map->iova; |
697 | } | 694 | size = map->size; |
695 | while ((dma = vfio_find_dma(iommu, iova, size))) { | ||
696 | int r = vfio_remove_dma_overlap(iommu, iova, | ||
697 | &size, dma); | ||
698 | if (WARN_ON(r || !size)) | ||
699 | break; | ||
698 | } | 700 | } |
699 | 701 | ||
700 | mutex_unlock(&iommu->lock); | 702 | mutex_unlock(&iommu->lock); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 592b31698fc8..ce5221fa393a 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -728,7 +728,12 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, | |||
728 | } | 728 | } |
729 | se_sess = tv_nexus->tvn_se_sess; | 729 | se_sess = tv_nexus->tvn_se_sess; |
730 | 730 | ||
731 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_KERNEL); | 731 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); |
732 | if (tag < 0) { | ||
733 | pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); | ||
734 | return ERR_PTR(-ENOMEM); | ||
735 | } | ||
736 | |||
732 | cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; | 737 | cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; |
733 | sg = cmd->tvc_sgl; | 738 | sg = cmd->tvc_sgl; |
734 | pages = cmd->tvc_upages; | 739 | pages = cmd->tvc_upages; |
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/mmp/hw/mmp_ctrl.c index 75dca19bf214..6ac755270ab4 100644 --- a/drivers/video/mmp/hw/mmp_ctrl.c +++ b/drivers/video/mmp/hw/mmp_ctrl.c | |||
@@ -514,7 +514,7 @@ static int mmphw_probe(struct platform_device *pdev) | |||
514 | if (IS_ERR(ctrl->clk)) { | 514 | if (IS_ERR(ctrl->clk)) { |
515 | dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); | 515 | dev_err(ctrl->dev, "unable to get clk %s\n", mi->clk_name); |
516 | ret = -ENOENT; | 516 | ret = -ENOENT; |
517 | goto failed_get_clk; | 517 | goto failed; |
518 | } | 518 | } |
519 | clk_prepare_enable(ctrl->clk); | 519 | clk_prepare_enable(ctrl->clk); |
520 | 520 | ||
@@ -551,21 +551,8 @@ failed_path_init: | |||
551 | path_deinit(path_plat); | 551 | path_deinit(path_plat); |
552 | } | 552 | } |
553 | 553 | ||
554 | if (ctrl->clk) { | 554 | clk_disable_unprepare(ctrl->clk); |
555 | devm_clk_put(ctrl->dev, ctrl->clk); | ||
556 | clk_disable_unprepare(ctrl->clk); | ||
557 | } | ||
558 | failed_get_clk: | ||
559 | devm_free_irq(ctrl->dev, ctrl->irq, ctrl); | ||
560 | failed: | 555 | failed: |
561 | if (ctrl) { | ||
562 | if (ctrl->reg_base) | ||
563 | devm_iounmap(ctrl->dev, ctrl->reg_base); | ||
564 | devm_release_mem_region(ctrl->dev, res->start, | ||
565 | resource_size(res)); | ||
566 | devm_kfree(ctrl->dev, ctrl); | ||
567 | } | ||
568 | |||
569 | dev_err(&pdev->dev, "device init failed\n"); | 556 | dev_err(&pdev->dev, "device init failed\n"); |
570 | 557 | ||
571 | return ret; | 558 | return ret; |
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index d250ed0f806d..27197a8048c0 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c | |||
@@ -620,6 +620,7 @@ static int mxsfb_restore_mode(struct mxsfb_info *host) | |||
620 | break; | 620 | break; |
621 | case 3: | 621 | case 3: |
622 | bits_per_pixel = 32; | 622 | bits_per_pixel = 32; |
623 | break; | ||
623 | case 1: | 624 | case 1: |
624 | default: | 625 | default: |
625 | return -EINVAL; | 626 | return -EINVAL; |
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c index 7ef079c146e7..c172a5281f9e 100644 --- a/drivers/video/neofb.c +++ b/drivers/video/neofb.c | |||
@@ -2075,6 +2075,7 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
2075 | if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, | 2075 | if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, |
2076 | info->monspecs.modedb, 16)) { | 2076 | info->monspecs.modedb, 16)) { |
2077 | printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); | 2077 | printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); |
2078 | err = -EINVAL; | ||
2078 | goto err_map_video; | 2079 | goto err_map_video; |
2079 | } | 2080 | } |
2080 | 2081 | ||
@@ -2097,7 +2098,8 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
2097 | info->fix.smem_len >> 10, info->var.xres, | 2098 | info->fix.smem_len >> 10, info->var.xres, |
2098 | info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); | 2099 | info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); |
2099 | 2100 | ||
2100 | if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) | 2101 | err = fb_alloc_cmap(&info->cmap, 256, 0); |
2102 | if (err < 0) | ||
2101 | goto err_map_video; | 2103 | goto err_map_video; |
2102 | 2104 | ||
2103 | err = register_framebuffer(info); | 2105 | err = register_framebuffer(info); |
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 171821ddd78d..ba5b40f581f6 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c | |||
@@ -120,7 +120,7 @@ int of_get_display_timing(struct device_node *np, const char *name, | |||
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | } | 121 | } |
122 | 122 | ||
123 | timing_np = of_find_node_by_name(np, name); | 123 | timing_np = of_get_child_by_name(np, name); |
124 | if (!timing_np) { | 124 | if (!timing_np) { |
125 | pr_err("%s: could not find node '%s'\n", | 125 | pr_err("%s: could not find node '%s'\n", |
126 | of_node_full_name(np), name); | 126 | of_node_full_name(np), name); |
@@ -143,11 +143,11 @@ struct display_timings *of_get_display_timings(struct device_node *np) | |||
143 | struct display_timings *disp; | 143 | struct display_timings *disp; |
144 | 144 | ||
145 | if (!np) { | 145 | if (!np) { |
146 | pr_err("%s: no devicenode given\n", of_node_full_name(np)); | 146 | pr_err("%s: no device node given\n", of_node_full_name(np)); |
147 | return NULL; | 147 | return NULL; |
148 | } | 148 | } |
149 | 149 | ||
150 | timings_np = of_find_node_by_name(np, "display-timings"); | 150 | timings_np = of_get_child_by_name(np, "display-timings"); |
151 | if (!timings_np) { | 151 | if (!timings_np) { |
152 | pr_err("%s: could not find display-timings node\n", | 152 | pr_err("%s: could not find display-timings node\n", |
153 | of_node_full_name(np)); | 153 | of_node_full_name(np)); |
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/omap2/displays-new/Kconfig index 6c90885b0940..10b25e7cd878 100644 --- a/drivers/video/omap2/displays-new/Kconfig +++ b/drivers/video/omap2/displays-new/Kconfig | |||
@@ -35,6 +35,7 @@ config DISPLAY_PANEL_DPI | |||
35 | 35 | ||
36 | config DISPLAY_PANEL_DSI_CM | 36 | config DISPLAY_PANEL_DSI_CM |
37 | tristate "Generic DSI Command Mode Panel" | 37 | tristate "Generic DSI Command Mode Panel" |
38 | depends on BACKLIGHT_CLASS_DEVICE | ||
38 | help | 39 | help |
39 | Driver for generic DSI command mode panels. | 40 | Driver for generic DSI command mode panels. |
40 | 41 | ||
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c index 1b60698f141e..ccd9073f706f 100644 --- a/drivers/video/omap2/displays-new/connector-analog-tv.c +++ b/drivers/video/omap2/displays-new/connector-analog-tv.c | |||
@@ -191,7 +191,7 @@ static int tvc_probe_pdata(struct platform_device *pdev) | |||
191 | in = omap_dss_find_output(pdata->source); | 191 | in = omap_dss_find_output(pdata->source); |
192 | if (in == NULL) { | 192 | if (in == NULL) { |
193 | dev_err(&pdev->dev, "Failed to find video source\n"); | 193 | dev_err(&pdev->dev, "Failed to find video source\n"); |
194 | return -ENODEV; | 194 | return -EPROBE_DEFER; |
195 | } | 195 | } |
196 | 196 | ||
197 | ddata->in = in; | 197 | ddata->in = in; |
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/omap2/displays-new/connector-dvi.c index bc5f8ceda371..63d88ee6dfe4 100644 --- a/drivers/video/omap2/displays-new/connector-dvi.c +++ b/drivers/video/omap2/displays-new/connector-dvi.c | |||
@@ -263,7 +263,7 @@ static int dvic_probe_pdata(struct platform_device *pdev) | |||
263 | in = omap_dss_find_output(pdata->source); | 263 | in = omap_dss_find_output(pdata->source); |
264 | if (in == NULL) { | 264 | if (in == NULL) { |
265 | dev_err(&pdev->dev, "Failed to find video source\n"); | 265 | dev_err(&pdev->dev, "Failed to find video source\n"); |
266 | return -ENODEV; | 266 | return -EPROBE_DEFER; |
267 | } | 267 | } |
268 | 268 | ||
269 | ddata->in = in; | 269 | ddata->in = in; |
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/omap2/displays-new/connector-hdmi.c index c5826716d6ab..9abe2c039ae9 100644 --- a/drivers/video/omap2/displays-new/connector-hdmi.c +++ b/drivers/video/omap2/displays-new/connector-hdmi.c | |||
@@ -290,7 +290,7 @@ static int hdmic_probe_pdata(struct platform_device *pdev) | |||
290 | in = omap_dss_find_output(pdata->source); | 290 | in = omap_dss_find_output(pdata->source); |
291 | if (in == NULL) { | 291 | if (in == NULL) { |
292 | dev_err(&pdev->dev, "Failed to find video source\n"); | 292 | dev_err(&pdev->dev, "Failed to find video source\n"); |
293 | return -ENODEV; | 293 | return -EPROBE_DEFER; |
294 | } | 294 | } |
295 | 295 | ||
296 | ddata->in = in; | 296 | ddata->in = in; |
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 02a7340111df..477975009eee 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
@@ -3691,6 +3691,7 @@ static int __init omap_dispchw_probe(struct platform_device *pdev) | |||
3691 | } | 3691 | } |
3692 | 3692 | ||
3693 | pm_runtime_enable(&pdev->dev); | 3693 | pm_runtime_enable(&pdev->dev); |
3694 | pm_runtime_irq_safe(&pdev->dev); | ||
3694 | 3695 | ||
3695 | r = dispc_runtime_get(); | 3696 | r = dispc_runtime_get(); |
3696 | if (r) | 3697 | if (r) |
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 47ca86c5c6c0..d838ba829459 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c | |||
@@ -1336,14 +1336,7 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1336 | (info->var.bits_per_pixel * info->var.xres_virtual); | 1336 | (info->var.bits_per_pixel * info->var.xres_virtual); |
1337 | if (info->var.yres_virtual < info->var.yres) { | 1337 | if (info->var.yres_virtual < info->var.yres) { |
1338 | dev_err(info->device, "virtual vertical size smaller than real\n"); | 1338 | dev_err(info->device, "virtual vertical size smaller than real\n"); |
1339 | goto err_find_mode; | 1339 | rc = -EINVAL; |
1340 | } | ||
1341 | |||
1342 | /* maximize virtual vertical size for fast scrolling */ | ||
1343 | info->var.yres_virtual = info->fix.smem_len * 8 / | ||
1344 | (info->var.bits_per_pixel * info->var.xres_virtual); | ||
1345 | if (info->var.yres_virtual < info->var.yres) { | ||
1346 | dev_err(info->device, "virtual vertical size smaller than real\n"); | ||
1347 | goto err_find_mode; | 1340 | goto err_find_mode; |
1348 | } | 1341 | } |
1349 | 1342 | ||
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 5be5e3d14f79..19f3c3fc65f4 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -802,6 +802,12 @@ static int hpwdt_init_one(struct pci_dev *dev, | |||
802 | return -ENODEV; | 802 | return -ENODEV; |
803 | } | 803 | } |
804 | 804 | ||
805 | /* | ||
806 | * Ignore all auxilary iLO devices with the following PCI ID | ||
807 | */ | ||
808 | if (dev->subsystem_device == 0x1979) | ||
809 | return -ENODEV; | ||
810 | |||
805 | if (pci_enable_device(dev)) { | 811 | if (pci_enable_device(dev)) { |
806 | dev_warn(&dev->dev, | 812 | dev_warn(&dev->dev, |
807 | "Not possible to enable PCI Device: 0x%x:0x%x.\n", | 813 | "Not possible to enable PCI Device: 0x%x:0x%x.\n", |
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c index 491419e0772a..5c3d4df63e68 100644 --- a/drivers/watchdog/kempld_wdt.c +++ b/drivers/watchdog/kempld_wdt.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4) | 35 | #define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4) |
36 | #define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x)) | 36 | #define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x)) |
37 | #define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4) | 37 | #define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4) |
38 | #define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x30) << 4) | 38 | #define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4) |
39 | #define STAGE_CFG_PRESCALER_MASK 0x30 | 39 | #define STAGE_CFG_PRESCALER_MASK 0x30 |
40 | #define STAGE_CFG_ACTION_MASK 0x7 | 40 | #define STAGE_CFG_ACTION_MASK 0x7 |
41 | #define STAGE_CFG_ASSERT (1 << 3) | 41 | #define STAGE_CFG_ASSERT (1 << 3) |
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c index 1f94b42764aa..f6caa77151c7 100644 --- a/drivers/watchdog/sunxi_wdt.c +++ b/drivers/watchdog/sunxi_wdt.c | |||
@@ -146,7 +146,7 @@ static const struct watchdog_ops sunxi_wdt_ops = { | |||
146 | .set_timeout = sunxi_wdt_set_timeout, | 146 | .set_timeout = sunxi_wdt_set_timeout, |
147 | }; | 147 | }; |
148 | 148 | ||
149 | static int __init sunxi_wdt_probe(struct platform_device *pdev) | 149 | static int sunxi_wdt_probe(struct platform_device *pdev) |
150 | { | 150 | { |
151 | struct sunxi_wdt_dev *sunxi_wdt; | 151 | struct sunxi_wdt_dev *sunxi_wdt; |
152 | struct resource *res; | 152 | struct resource *res; |
@@ -187,7 +187,7 @@ static int __init sunxi_wdt_probe(struct platform_device *pdev) | |||
187 | return 0; | 187 | return 0; |
188 | } | 188 | } |
189 | 189 | ||
190 | static int __exit sunxi_wdt_remove(struct platform_device *pdev) | 190 | static int sunxi_wdt_remove(struct platform_device *pdev) |
191 | { | 191 | { |
192 | struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev); | 192 | struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev); |
193 | 193 | ||
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 42913f131dc2..c9b0c627fe7e 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c | |||
@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, | |||
310 | 310 | ||
311 | case WDIOC_GETSTATUS: | 311 | case WDIOC_GETSTATUS: |
312 | case WDIOC_GETBOOTSTATUS: | 312 | case WDIOC_GETBOOTSTATUS: |
313 | return put_user(0, p); | 313 | error = put_user(0, p); |
314 | break; | ||
314 | 315 | ||
315 | case WDIOC_KEEPALIVE: | 316 | case WDIOC_KEEPALIVE: |
316 | ts72xx_wdt_kick(wdt); | 317 | ts72xx_wdt_kick(wdt); |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index a50c6e3a7cc4..b232908a6192 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -398,8 +398,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
398 | if (nr_pages > ARRAY_SIZE(frame_list)) | 398 | if (nr_pages > ARRAY_SIZE(frame_list)) |
399 | nr_pages = ARRAY_SIZE(frame_list); | 399 | nr_pages = ARRAY_SIZE(frame_list); |
400 | 400 | ||
401 | scratch_page = get_balloon_scratch_page(); | ||
402 | |||
403 | for (i = 0; i < nr_pages; i++) { | 401 | for (i = 0; i < nr_pages; i++) { |
404 | page = alloc_page(gfp); | 402 | page = alloc_page(gfp); |
405 | if (page == NULL) { | 403 | if (page == NULL) { |
@@ -413,6 +411,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
413 | 411 | ||
414 | scrub_page(page); | 412 | scrub_page(page); |
415 | 413 | ||
414 | /* | ||
415 | * Ballooned out frames are effectively replaced with | ||
416 | * a scratch frame. Ensure direct mappings and the | ||
417 | * p2m are consistent. | ||
418 | */ | ||
419 | scratch_page = get_balloon_scratch_page(); | ||
416 | #ifdef CONFIG_XEN_HAVE_PVMMU | 420 | #ifdef CONFIG_XEN_HAVE_PVMMU |
417 | if (xen_pv_domain() && !PageHighMem(page)) { | 421 | if (xen_pv_domain() && !PageHighMem(page)) { |
418 | ret = HYPERVISOR_update_va_mapping( | 422 | ret = HYPERVISOR_update_va_mapping( |
@@ -422,24 +426,19 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
422 | BUG_ON(ret); | 426 | BUG_ON(ret); |
423 | } | 427 | } |
424 | #endif | 428 | #endif |
425 | } | ||
426 | |||
427 | /* Ensure that ballooned highmem pages don't have kmaps. */ | ||
428 | kmap_flush_unused(); | ||
429 | flush_tlb_all(); | ||
430 | |||
431 | /* No more mappings: invalidate P2M and add to balloon. */ | ||
432 | for (i = 0; i < nr_pages; i++) { | ||
433 | pfn = mfn_to_pfn(frame_list[i]); | ||
434 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 429 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
435 | unsigned long p; | 430 | unsigned long p; |
436 | p = page_to_pfn(scratch_page); | 431 | p = page_to_pfn(scratch_page); |
437 | __set_phys_to_machine(pfn, pfn_to_mfn(p)); | 432 | __set_phys_to_machine(pfn, pfn_to_mfn(p)); |
438 | } | 433 | } |
434 | put_balloon_scratch_page(); | ||
435 | |||
439 | balloon_append(pfn_to_page(pfn)); | 436 | balloon_append(pfn_to_page(pfn)); |
440 | } | 437 | } |
441 | 438 | ||
442 | put_balloon_scratch_page(); | 439 | /* Ensure that ballooned highmem pages don't have kmaps. */ |
440 | kmap_flush_unused(); | ||
441 | flush_tlb_all(); | ||
443 | 442 | ||
444 | set_xen_guest_handle(reservation.extent_start, frame_list); | 443 | set_xen_guest_handle(reservation.extent_start, frame_list); |
445 | reservation.nr_extents = nr_pages; | 444 | reservation.nr_extents = nr_pages; |