diff options
Diffstat (limited to 'drivers')
273 files changed, 3724 insertions, 2402 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 657964e8ab7e..37fb19047603 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -65,6 +65,7 @@ struct lpss_private_data; | |||
| 65 | 65 | ||
| 66 | struct lpss_device_desc { | 66 | struct lpss_device_desc { |
| 67 | unsigned int flags; | 67 | unsigned int flags; |
| 68 | const char *clk_con_id; | ||
| 68 | unsigned int prv_offset; | 69 | unsigned int prv_offset; |
| 69 | size_t prv_size_override; | 70 | size_t prv_size_override; |
| 70 | void (*setup)(struct lpss_private_data *pdata); | 71 | void (*setup)(struct lpss_private_data *pdata); |
| @@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = { | |||
| 140 | 141 | ||
| 141 | static struct lpss_device_desc lpt_uart_dev_desc = { | 142 | static struct lpss_device_desc lpt_uart_dev_desc = { |
| 142 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, | 143 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, |
| 144 | .clk_con_id = "baudclk", | ||
| 143 | .prv_offset = 0x800, | 145 | .prv_offset = 0x800, |
| 144 | .setup = lpss_uart_setup, | 146 | .setup = lpss_uart_setup, |
| 145 | }; | 147 | }; |
| @@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = { | |||
| 156 | 158 | ||
| 157 | static struct lpss_device_desc byt_uart_dev_desc = { | 159 | static struct lpss_device_desc byt_uart_dev_desc = { |
| 158 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, | 160 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
| 161 | .clk_con_id = "baudclk", | ||
| 159 | .prv_offset = 0x800, | 162 | .prv_offset = 0x800, |
| 160 | .setup = lpss_uart_setup, | 163 | .setup = lpss_uart_setup, |
| 161 | }; | 164 | }; |
| @@ -313,7 +316,7 @@ out: | |||
| 313 | return PTR_ERR(clk); | 316 | return PTR_ERR(clk); |
| 314 | 317 | ||
| 315 | pdata->clk = clk; | 318 | pdata->clk = clk; |
| 316 | clk_register_clkdev(clk, NULL, devname); | 319 | clk_register_clkdev(clk, dev_desc->clk_con_id, devname); |
| 317 | return 0; | 320 | return 0; |
| 318 | } | 321 | } |
| 319 | 322 | ||
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index c723668e3e27..5589a6e2a023 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c | |||
| @@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) | |||
| 42 | * CHECKME: len might be required to check versus a minimum | 42 | * CHECKME: len might be required to check versus a minimum |
| 43 | * length as well. 1 for io is fine, but for memory it does | 43 | * length as well. 1 for io is fine, but for memory it does |
| 44 | * not make any sense at all. | 44 | * not make any sense at all. |
| 45 | * Note: some BIOSes report incorrect length for ACPI address space | ||
| 46 | * descriptor, so remove check of 'reslen == len' to avoid regression. | ||
| 45 | */ | 47 | */ |
| 46 | if (len && reslen && reslen == len && start <= end) | 48 | if (len && reslen && start <= end) |
| 47 | return true; | 49 | return true; |
| 48 | 50 | ||
| 49 | pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", | 51 | pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index debd30917010..26eb70c8f518 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
| @@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void) | |||
| 2110 | 2110 | ||
| 2111 | int acpi_video_register(void) | 2111 | int acpi_video_register(void) |
| 2112 | { | 2112 | { |
| 2113 | int result = 0; | 2113 | int ret; |
| 2114 | |||
| 2114 | if (register_count) { | 2115 | if (register_count) { |
| 2115 | /* | 2116 | /* |
| 2116 | * if the function of acpi_video_register is already called, | 2117 | * if the function of acpi_video_register is already called, |
| @@ -2122,9 +2123,9 @@ int acpi_video_register(void) | |||
| 2122 | mutex_init(&video_list_lock); | 2123 | mutex_init(&video_list_lock); |
| 2123 | INIT_LIST_HEAD(&video_bus_head); | 2124 | INIT_LIST_HEAD(&video_bus_head); |
| 2124 | 2125 | ||
| 2125 | result = acpi_bus_register_driver(&acpi_video_bus); | 2126 | ret = acpi_bus_register_driver(&acpi_video_bus); |
| 2126 | if (result < 0) | 2127 | if (ret) |
| 2127 | return -ENODEV; | 2128 | return ret; |
| 2128 | 2129 | ||
| 2129 | /* | 2130 | /* |
| 2130 | * When the acpi_video_bus is loaded successfully, increase | 2131 | * When the acpi_video_bus is loaded successfully, increase |
| @@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight); | |||
| 2176 | 2177 | ||
| 2177 | static int __init acpi_video_init(void) | 2178 | static int __init acpi_video_init(void) |
| 2178 | { | 2179 | { |
| 2180 | /* | ||
| 2181 | * Let the module load even if ACPI is disabled (e.g. due to | ||
| 2182 | * a broken BIOS) so that i915.ko can still be loaded on such | ||
| 2183 | * old systems without an AcpiOpRegion. | ||
| 2184 | * | ||
| 2185 | * acpi_video_register() will report -ENODEV later as well due | ||
| 2186 | * to acpi_disabled when i915.ko tries to register itself afterwards. | ||
| 2187 | */ | ||
| 2188 | if (acpi_disabled) | ||
| 2189 | return 0; | ||
| 2190 | |||
| 2179 | dmi_check_system(video_dmi_table); | 2191 | dmi_check_system(video_dmi_table); |
| 2180 | 2192 | ||
| 2181 | if (intel_opregion_present()) | 2193 | if (intel_opregion_present()) |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 33b09b6568a4..6607f3c6ace1 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
| @@ -551,7 +551,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, | |||
| 551 | { | 551 | { |
| 552 | void *page_addr; | 552 | void *page_addr; |
| 553 | unsigned long user_page_addr; | 553 | unsigned long user_page_addr; |
| 554 | struct vm_struct tmp_area; | ||
| 555 | struct page **page; | 554 | struct page **page; |
| 556 | struct mm_struct *mm; | 555 | struct mm_struct *mm; |
| 557 | 556 | ||
| @@ -600,10 +599,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, | |||
| 600 | proc->pid, page_addr); | 599 | proc->pid, page_addr); |
| 601 | goto err_alloc_page_failed; | 600 | goto err_alloc_page_failed; |
| 602 | } | 601 | } |
| 603 | tmp_area.addr = page_addr; | 602 | ret = map_kernel_range_noflush((unsigned long)page_addr, |
| 604 | tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; | 603 | PAGE_SIZE, PAGE_KERNEL, page); |
| 605 | ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); | 604 | flush_cache_vmap((unsigned long)page_addr, |
| 606 | if (ret) { | 605 | (unsigned long)page_addr + PAGE_SIZE); |
| 606 | if (ret != 1) { | ||
| 607 | pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", | 607 | pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", |
| 608 | proc->pid, page_addr); | 608 | proc->pid, page_addr); |
| 609 | goto err_map_kernel_failed; | 609 | goto err_map_kernel_failed; |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index f9054cd36a72..5389579c5120 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
| @@ -869,6 +869,8 @@ try_offline_again: | |||
| 869 | */ | 869 | */ |
| 870 | ata_msleep(ap, 1); | 870 | ata_msleep(ap, 1); |
| 871 | 871 | ||
| 872 | sata_set_spd(link); | ||
| 873 | |||
| 872 | /* | 874 | /* |
| 873 | * Now, bring the host controller online again, this can take time | 875 | * Now, bring the host controller online again, this can take time |
| 874 | * as PHY reset and communication establishment, 1st D2H FIS and | 876 | * as PHY reset and communication establishment, 1st D2H FIS and |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ba4abbe4693c..45937f88e77c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev) | |||
| 2242 | } | 2242 | } |
| 2243 | 2243 | ||
| 2244 | static int pm_genpd_summary_one(struct seq_file *s, | 2244 | static int pm_genpd_summary_one(struct seq_file *s, |
| 2245 | struct generic_pm_domain *gpd) | 2245 | struct generic_pm_domain *genpd) |
| 2246 | { | 2246 | { |
| 2247 | static const char * const status_lookup[] = { | 2247 | static const char * const status_lookup[] = { |
| 2248 | [GPD_STATE_ACTIVE] = "on", | 2248 | [GPD_STATE_ACTIVE] = "on", |
| @@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s, | |||
| 2256 | struct gpd_link *link; | 2256 | struct gpd_link *link; |
| 2257 | int ret; | 2257 | int ret; |
| 2258 | 2258 | ||
| 2259 | ret = mutex_lock_interruptible(&gpd->lock); | 2259 | ret = mutex_lock_interruptible(&genpd->lock); |
| 2260 | if (ret) | 2260 | if (ret) |
| 2261 | return -ERESTARTSYS; | 2261 | return -ERESTARTSYS; |
| 2262 | 2262 | ||
| 2263 | if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) | 2263 | if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) |
| 2264 | goto exit; | 2264 | goto exit; |
| 2265 | seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); | 2265 | seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); |
| 2266 | 2266 | ||
| 2267 | /* | 2267 | /* |
| 2268 | * Modifications on the list require holding locks on both | 2268 | * Modifications on the list require holding locks on both |
| 2269 | * master and slave, so we are safe. | 2269 | * master and slave, so we are safe. |
| 2270 | * Also gpd->name is immutable. | 2270 | * Also genpd->name is immutable. |
| 2271 | */ | 2271 | */ |
| 2272 | list_for_each_entry(link, &gpd->master_links, master_node) { | 2272 | list_for_each_entry(link, &genpd->master_links, master_node) { |
| 2273 | seq_printf(s, "%s", link->slave->name); | 2273 | seq_printf(s, "%s", link->slave->name); |
| 2274 | if (!list_is_last(&link->master_node, &gpd->master_links)) | 2274 | if (!list_is_last(&link->master_node, &genpd->master_links)) |
| 2275 | seq_puts(s, ", "); | 2275 | seq_puts(s, ", "); |
| 2276 | } | 2276 | } |
| 2277 | 2277 | ||
| 2278 | list_for_each_entry(pm_data, &gpd->dev_list, list_node) { | 2278 | list_for_each_entry(pm_data, &genpd->dev_list, list_node) { |
| 2279 | kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); | 2279 | kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); |
| 2280 | if (kobj_path == NULL) | 2280 | if (kobj_path == NULL) |
| 2281 | continue; | 2281 | continue; |
| @@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s, | |||
| 2287 | 2287 | ||
| 2288 | seq_puts(s, "\n"); | 2288 | seq_puts(s, "\n"); |
| 2289 | exit: | 2289 | exit: |
| 2290 | mutex_unlock(&gpd->lock); | 2290 | mutex_unlock(&genpd->lock); |
| 2291 | 2291 | ||
| 2292 | return 0; | 2292 | return 0; |
| 2293 | } | 2293 | } |
| 2294 | 2294 | ||
| 2295 | static int pm_genpd_summary_show(struct seq_file *s, void *data) | 2295 | static int pm_genpd_summary_show(struct seq_file *s, void *data) |
| 2296 | { | 2296 | { |
| 2297 | struct generic_pm_domain *gpd; | 2297 | struct generic_pm_domain *genpd; |
| 2298 | int ret = 0; | 2298 | int ret = 0; |
| 2299 | 2299 | ||
| 2300 | seq_puts(s, " domain status slaves\n"); | 2300 | seq_puts(s, " domain status slaves\n"); |
| @@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) | |||
| 2305 | if (ret) | 2305 | if (ret) |
| 2306 | return -ERESTARTSYS; | 2306 | return -ERESTARTSYS; |
| 2307 | 2307 | ||
| 2308 | list_for_each_entry(gpd, &gpd_list, gpd_list_node) { | 2308 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) { |
| 2309 | ret = pm_genpd_summary_one(s, gpd); | 2309 | ret = pm_genpd_summary_one(s, genpd); |
| 2310 | if (ret) | 2310 | if (ret) |
| 2311 | break; | 2311 | break; |
| 2312 | } | 2312 | } |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index c2744b30d5d9..aab7158d2afe 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -730,6 +730,7 @@ void pm_system_wakeup(void) | |||
| 730 | pm_abort_suspend = true; | 730 | pm_abort_suspend = true; |
| 731 | freeze_wake(); | 731 | freeze_wake(); |
| 732 | } | 732 | } |
| 733 | EXPORT_SYMBOL_GPL(pm_system_wakeup); | ||
| 733 | 734 | ||
| 734 | void pm_wakeup_clear(void) | 735 | void pm_wakeup_clear(void) |
| 735 | { | 736 | { |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b87688881143..8bfc4c2bba87 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
| @@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
| 272 | { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, | 272 | { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, |
| 273 | 273 | ||
| 274 | /* Intel Bluetooth devices */ | 274 | /* Intel Bluetooth devices */ |
| 275 | { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, | ||
| 275 | { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, | 276 | { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, |
| 276 | { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, | 277 | { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, |
| 277 | { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, | 278 | { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, |
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 1d278ccd751f..e096e9cddb40 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c | |||
| @@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip) | |||
| 140 | { | 140 | { |
| 141 | int rc; | 141 | int rc; |
| 142 | 142 | ||
| 143 | rc = device_add(&chip->dev); | 143 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); |
| 144 | if (rc) { | 144 | if (rc) { |
| 145 | dev_err(&chip->dev, | 145 | dev_err(&chip->dev, |
| 146 | "unable to device_register() %s, major %d, minor %d, err=%d\n", | 146 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", |
| 147 | chip->devname, MAJOR(chip->dev.devt), | 147 | chip->devname, MAJOR(chip->dev.devt), |
| 148 | MINOR(chip->dev.devt), rc); | 148 | MINOR(chip->dev.devt), rc); |
| 149 | 149 | ||
| 150 | device_unregister(&chip->dev); | ||
| 150 | return rc; | 151 | return rc; |
| 151 | } | 152 | } |
| 152 | 153 | ||
| 153 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); | 154 | rc = device_add(&chip->dev); |
| 154 | if (rc) { | 155 | if (rc) { |
| 155 | dev_err(&chip->dev, | 156 | dev_err(&chip->dev, |
| 156 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", | 157 | "unable to device_register() %s, major %d, minor %d, err=%d\n", |
| 157 | chip->devname, MAJOR(chip->dev.devt), | 158 | chip->devname, MAJOR(chip->dev.devt), |
| 158 | MINOR(chip->dev.devt), rc); | 159 | MINOR(chip->dev.devt), rc); |
| 159 | 160 | ||
| 160 | device_unregister(&chip->dev); | ||
| 161 | return rc; | 161 | return rc; |
| 162 | } | 162 | } |
| 163 | 163 | ||
| @@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip) | |||
| 174 | * tpm_chip_register() - create a character device for the TPM chip | 174 | * tpm_chip_register() - create a character device for the TPM chip |
| 175 | * @chip: TPM chip to use. | 175 | * @chip: TPM chip to use. |
| 176 | * | 176 | * |
| 177 | * Creates a character device for the TPM chip and adds sysfs interfaces for | 177 | * Creates a character device for the TPM chip and adds sysfs attributes for |
| 178 | * the device, PPI and TCPA. As the last step this function adds the | 178 | * the device. As the last step this function adds the chip to the list of TPM |
| 179 | * chip to the list of TPM chips available for use. | 179 | * chips available for in-kernel use. |
| 180 | * | 180 | * |
| 181 | * NOTE: This function should be only called after the chip initialization | 181 | * This function should be only called after the chip initialization is |
| 182 | * is complete. | 182 | * complete. |
| 183 | * | ||
| 184 | * Called from tpm_<specific>.c probe function only for devices | ||
| 185 | * the driver has determined it should claim. Prior to calling | ||
| 186 | * this function the specific probe function has called pci_enable_device | ||
| 187 | * upon errant exit from this function specific probe function should call | ||
| 188 | * pci_disable_device | ||
| 189 | */ | 183 | */ |
| 190 | int tpm_chip_register(struct tpm_chip *chip) | 184 | int tpm_chip_register(struct tpm_chip *chip) |
| 191 | { | 185 | { |
| 192 | int rc; | 186 | int rc; |
| 193 | 187 | ||
| 194 | rc = tpm_dev_add_device(chip); | ||
| 195 | if (rc) | ||
| 196 | return rc; | ||
| 197 | |||
| 198 | /* Populate sysfs for TPM1 devices. */ | 188 | /* Populate sysfs for TPM1 devices. */ |
| 199 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { | 189 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { |
| 200 | rc = tpm_sysfs_add_device(chip); | 190 | rc = tpm_sysfs_add_device(chip); |
| @@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
| 208 | chip->bios_dir = tpm_bios_log_setup(chip->devname); | 198 | chip->bios_dir = tpm_bios_log_setup(chip->devname); |
| 209 | } | 199 | } |
| 210 | 200 | ||
| 201 | rc = tpm_dev_add_device(chip); | ||
| 202 | if (rc) | ||
| 203 | return rc; | ||
| 204 | |||
| 211 | /* Make the chip available. */ | 205 | /* Make the chip available. */ |
| 212 | spin_lock(&driver_lock); | 206 | spin_lock(&driver_lock); |
| 213 | list_add_rcu(&chip->list, &tpm_chip_list); | 207 | list_add_rcu(&chip->list, &tpm_chip_list); |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b1e53e3aece5..42ffa5e7a1e0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c | |||
| @@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
| 124 | { | 124 | { |
| 125 | struct ibmvtpm_dev *ibmvtpm; | 125 | struct ibmvtpm_dev *ibmvtpm; |
| 126 | struct ibmvtpm_crq crq; | 126 | struct ibmvtpm_crq crq; |
| 127 | u64 *word = (u64 *) &crq; | 127 | __be64 *word = (__be64 *)&crq; |
| 128 | int rc; | 128 | int rc; |
| 129 | 129 | ||
| 130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); | 130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); |
| @@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
| 145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); | 145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); |
| 146 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 146 | crq.valid = (u8)IBMVTPM_VALID_CMD; |
| 147 | crq.msg = (u8)VTPM_TPM_COMMAND; | 147 | crq.msg = (u8)VTPM_TPM_COMMAND; |
| 148 | crq.len = (u16)count; | 148 | crq.len = cpu_to_be16(count); |
| 149 | crq.data = ibmvtpm->rtce_dma_handle; | 149 | crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); |
| 150 | 150 | ||
| 151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), | 151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), |
| 152 | cpu_to_be64(word[1])); | 152 | be64_to_cpu(word[1])); |
| 153 | if (rc != H_SUCCESS) { | 153 | if (rc != H_SUCCESS) { |
| 154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); | 154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); |
| 155 | rc = 0; | 155 | rc = 0; |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index f595f14426bf..6af92890518f 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h | |||
| @@ -22,9 +22,9 @@ | |||
| 22 | struct ibmvtpm_crq { | 22 | struct ibmvtpm_crq { |
| 23 | u8 valid; | 23 | u8 valid; |
| 24 | u8 msg; | 24 | u8 msg; |
| 25 | u16 len; | 25 | __be16 len; |
| 26 | u32 data; | 26 | __be32 data; |
| 27 | u64 reserved; | 27 | __be64 reserved; |
| 28 | } __attribute__((packed, aligned(8))); | 28 | } __attribute__((packed, aligned(8))); |
| 29 | 29 | ||
| 30 | struct ibmvtpm_crq_queue { | 30 | struct ibmvtpm_crq_queue { |
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index f07c8152e5cc..3f27d21fb729 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c | |||
| @@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type) | |||
| 89 | return 0; | 89 | return 0; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static void pmc_irq_suspend(struct irq_data *d) | ||
| 93 | { | ||
| 94 | struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); | ||
| 95 | |||
| 96 | pmc->imr = pmc_read(pmc, AT91_PMC_IMR); | ||
| 97 | pmc_write(pmc, AT91_PMC_IDR, pmc->imr); | ||
| 98 | } | ||
| 99 | |||
| 100 | static void pmc_irq_resume(struct irq_data *d) | ||
| 101 | { | ||
| 102 | struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); | ||
| 103 | |||
| 104 | pmc_write(pmc, AT91_PMC_IER, pmc->imr); | ||
| 105 | } | ||
| 106 | |||
| 92 | static struct irq_chip pmc_irq = { | 107 | static struct irq_chip pmc_irq = { |
| 93 | .name = "PMC", | 108 | .name = "PMC", |
| 94 | .irq_disable = pmc_irq_mask, | 109 | .irq_disable = pmc_irq_mask, |
| 95 | .irq_mask = pmc_irq_mask, | 110 | .irq_mask = pmc_irq_mask, |
| 96 | .irq_unmask = pmc_irq_unmask, | 111 | .irq_unmask = pmc_irq_unmask, |
| 97 | .irq_set_type = pmc_irq_set_type, | 112 | .irq_set_type = pmc_irq_set_type, |
| 113 | .irq_suspend = pmc_irq_suspend, | ||
| 114 | .irq_resume = pmc_irq_resume, | ||
| 98 | }; | 115 | }; |
| 99 | 116 | ||
| 100 | static struct lock_class_key pmc_lock_class; | 117 | static struct lock_class_key pmc_lock_class; |
| @@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np, | |||
| 224 | goto out_free_pmc; | 241 | goto out_free_pmc; |
| 225 | 242 | ||
| 226 | pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); | 243 | pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); |
| 227 | if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc)) | 244 | if (request_irq(pmc->virq, pmc_irq_handler, |
| 245 | IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc)) | ||
| 228 | goto out_remove_irqdomain; | 246 | goto out_remove_irqdomain; |
| 229 | 247 | ||
| 230 | return pmc; | 248 | return pmc; |
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 52d2041fa3f6..69abb08cf146 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h | |||
| @@ -33,6 +33,7 @@ struct at91_pmc { | |||
| 33 | spinlock_t lock; | 33 | spinlock_t lock; |
| 34 | const struct at91_pmc_caps *caps; | 34 | const struct at91_pmc_caps *caps; |
| 35 | struct irq_domain *irqdomain; | 35 | struct irq_domain *irqdomain; |
| 36 | u32 imr; | ||
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | static inline void pmc_lock(struct at91_pmc *pmc) | 39 | static inline void pmc_lock(struct at91_pmc *pmc) |
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index db7f8bce7467..25006a8bb8e6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c | |||
| @@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, | |||
| 144 | divider->flags); | 144 | divider->flags); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | /* | ||
| 148 | * The reverse of DIV_ROUND_UP: The maximum number which | ||
| 149 | * divided by m is r | ||
| 150 | */ | ||
| 151 | #define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) | ||
| 152 | |||
| 153 | static bool _is_valid_table_div(const struct clk_div_table *table, | 147 | static bool _is_valid_table_div(const struct clk_div_table *table, |
| 154 | unsigned int div) | 148 | unsigned int div) |
| 155 | { | 149 | { |
| @@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table, | |||
| 225 | unsigned long parent_rate, unsigned long rate, | 219 | unsigned long parent_rate, unsigned long rate, |
| 226 | unsigned long flags) | 220 | unsigned long flags) |
| 227 | { | 221 | { |
| 228 | int up, down, div; | 222 | int up, down; |
| 223 | unsigned long up_rate, down_rate; | ||
| 229 | 224 | ||
| 230 | up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); | 225 | up = DIV_ROUND_UP(parent_rate, rate); |
| 226 | down = parent_rate / rate; | ||
| 231 | 227 | ||
| 232 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { | 228 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { |
| 233 | up = __roundup_pow_of_two(div); | 229 | up = __roundup_pow_of_two(up); |
| 234 | down = __rounddown_pow_of_two(div); | 230 | down = __rounddown_pow_of_two(down); |
| 235 | } else if (table) { | 231 | } else if (table) { |
| 236 | up = _round_up_table(table, div); | 232 | up = _round_up_table(table, up); |
| 237 | down = _round_down_table(table, div); | 233 | down = _round_down_table(table, down); |
| 238 | } | 234 | } |
| 239 | 235 | ||
| 240 | return (up - div) <= (div - down) ? up : down; | 236 | up_rate = DIV_ROUND_UP(parent_rate, up); |
| 237 | down_rate = DIV_ROUND_UP(parent_rate, down); | ||
| 238 | |||
| 239 | return (rate - up_rate) <= (down_rate - rate) ? up : down; | ||
| 241 | } | 240 | } |
| 242 | 241 | ||
| 243 | static int _div_round(const struct clk_div_table *table, | 242 | static int _div_round(const struct clk_div_table *table, |
| @@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, | |||
| 313 | return i; | 312 | return i; |
| 314 | } | 313 | } |
| 315 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), | 314 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), |
| 316 | MULT_ROUND_UP(rate, i)); | 315 | rate * i); |
| 317 | now = DIV_ROUND_UP(parent_rate, i); | 316 | now = DIV_ROUND_UP(parent_rate, i); |
| 318 | if (_is_best_div(rate, now, best, flags)) { | 317 | if (_is_best_div(rate, now, best, flags)) { |
| 319 | bestdiv = i; | 318 | bestdiv = i; |
| @@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 353 | bestdiv = readl(divider->reg) >> divider->shift; | 352 | bestdiv = readl(divider->reg) >> divider->shift; |
| 354 | bestdiv &= div_mask(divider->width); | 353 | bestdiv &= div_mask(divider->width); |
| 355 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); | 354 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); |
| 356 | return bestdiv; | 355 | return DIV_ROUND_UP(*prate, bestdiv); |
| 357 | } | 356 | } |
| 358 | 357 | ||
| 359 | return divider_round_rate(hw, rate, prate, divider->table, | 358 | return divider_round_rate(hw, rate, prate, divider->table, |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index eb0152961d3c..237f23f68bfc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
| @@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk) | |||
| 1350 | 1350 | ||
| 1351 | return rate; | 1351 | return rate; |
| 1352 | } | 1352 | } |
| 1353 | EXPORT_SYMBOL_GPL(clk_core_get_rate); | ||
| 1354 | 1353 | ||
| 1355 | /** | 1354 | /** |
| 1356 | * clk_get_rate - return the rate of clk | 1355 | * clk_get_rate - return the rate of clk |
| @@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk) | |||
| 2171 | } | 2170 | } |
| 2172 | 2171 | ||
| 2173 | /** | 2172 | /** |
| 2173 | * clk_is_match - check if two clk's point to the same hardware clock | ||
| 2174 | * @p: clk compared against q | ||
| 2175 | * @q: clk compared against p | ||
| 2176 | * | ||
| 2177 | * Returns true if the two struct clk pointers both point to the same hardware | ||
| 2178 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | ||
| 2179 | * share the same struct clk_core object. | ||
| 2180 | * | ||
| 2181 | * Returns false otherwise. Note that two NULL clks are treated as matching. | ||
| 2182 | */ | ||
| 2183 | bool clk_is_match(const struct clk *p, const struct clk *q) | ||
| 2184 | { | ||
| 2185 | /* trivial case: identical struct clk's or both NULL */ | ||
| 2186 | if (p == q) | ||
| 2187 | return true; | ||
| 2188 | |||
| 2189 | /* true if clk->core pointers match. Avoid derefing garbage */ | ||
| 2190 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) | ||
| 2191 | if (p->core == q->core) | ||
| 2192 | return true; | ||
| 2193 | |||
| 2194 | return false; | ||
| 2195 | } | ||
| 2196 | EXPORT_SYMBOL_GPL(clk_is_match); | ||
| 2197 | |||
| 2198 | /** | ||
| 2174 | * __clk_init - initialize the data structures in a struct clk | 2199 | * __clk_init - initialize the data structures in a struct clk |
| 2175 | * @dev: device initializing this clk, placeholder for now | 2200 | * @dev: device initializing this clk, placeholder for now |
| 2176 | * @clk: clk being initialized | 2201 | * @clk: clk being initialized |
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index b0b562b9ce0e..e60feffc10a1 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c | |||
| @@ -48,6 +48,17 @@ static struct clk_pll pll3 = { | |||
| 48 | }, | 48 | }, |
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | static struct clk_regmap pll4_vote = { | ||
| 52 | .enable_reg = 0x34c0, | ||
| 53 | .enable_mask = BIT(4), | ||
| 54 | .hw.init = &(struct clk_init_data){ | ||
| 55 | .name = "pll4_vote", | ||
| 56 | .parent_names = (const char *[]){ "pll4" }, | ||
| 57 | .num_parents = 1, | ||
| 58 | .ops = &clk_pll_vote_ops, | ||
| 59 | }, | ||
| 60 | }; | ||
| 61 | |||
| 51 | static struct clk_pll pll8 = { | 62 | static struct clk_pll pll8 = { |
| 52 | .l_reg = 0x3144, | 63 | .l_reg = 0x3144, |
| 53 | .m_reg = 0x3148, | 64 | .m_reg = 0x3148, |
| @@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = { | |||
| 3023 | 3034 | ||
| 3024 | static struct clk_regmap *gcc_msm8960_clks[] = { | 3035 | static struct clk_regmap *gcc_msm8960_clks[] = { |
| 3025 | [PLL3] = &pll3.clkr, | 3036 | [PLL3] = &pll3.clkr, |
| 3037 | [PLL4_VOTE] = &pll4_vote, | ||
| 3026 | [PLL8] = &pll8.clkr, | 3038 | [PLL8] = &pll8.clkr, |
| 3027 | [PLL8_VOTE] = &pll8_vote, | 3039 | [PLL8_VOTE] = &pll8_vote, |
| 3028 | [PLL14] = &pll14.clkr, | 3040 | [PLL14] = &pll14.clkr, |
| @@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = { | |||
| 3247 | 3259 | ||
| 3248 | static struct clk_regmap *gcc_apq8064_clks[] = { | 3260 | static struct clk_regmap *gcc_apq8064_clks[] = { |
| 3249 | [PLL3] = &pll3.clkr, | 3261 | [PLL3] = &pll3.clkr, |
| 3262 | [PLL4_VOTE] = &pll4_vote, | ||
| 3250 | [PLL8] = &pll8.clkr, | 3263 | [PLL8] = &pll8.clkr, |
| 3251 | [PLL8_VOTE] = &pll8_vote, | 3264 | [PLL8_VOTE] = &pll8_vote, |
| 3252 | [PLL14] = &pll14.clkr, | 3265 | [PLL14] = &pll14.clkr, |
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 121ffde25dc3..c9ff27b4648b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c | |||
| @@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = { | |||
| 462 | .remove = lcc_ipq806x_remove, | 462 | .remove = lcc_ipq806x_remove, |
| 463 | .driver = { | 463 | .driver = { |
| 464 | .name = "lcc-ipq806x", | 464 | .name = "lcc-ipq806x", |
| 465 | .owner = THIS_MODULE, | ||
| 466 | .of_match_table = lcc_ipq806x_match_table, | 465 | .of_match_table = lcc_ipq806x_match_table, |
| 467 | }, | 466 | }, |
| 468 | }; | 467 | }; |
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index a75a408cfccd..e2c863295f00 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c | |||
| @@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = { | |||
| 417 | .mnctr_en_bit = 8, | 417 | .mnctr_en_bit = 8, |
| 418 | .mnctr_reset_bit = 7, | 418 | .mnctr_reset_bit = 7, |
| 419 | .mnctr_mode_shift = 5, | 419 | .mnctr_mode_shift = 5, |
| 420 | .n_val_shift = 16, | 420 | .n_val_shift = 24, |
| 421 | .m_val_shift = 16, | 421 | .m_val_shift = 8, |
| 422 | .width = 8, | 422 | .width = 8, |
| 423 | }, | 423 | }, |
| 424 | .p = { | 424 | .p = { |
| @@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev) | |||
| 547 | return PTR_ERR(regmap); | 547 | return PTR_ERR(regmap); |
| 548 | 548 | ||
| 549 | /* Use the correct frequency plan depending on speed of PLL4 */ | 549 | /* Use the correct frequency plan depending on speed of PLL4 */ |
| 550 | val = regmap_read(regmap, 0x4, &val); | 550 | regmap_read(regmap, 0x4, &val); |
| 551 | if (val == 0x12) { | 551 | if (val == 0x12) { |
| 552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; | 552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; |
| 553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; | 553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; |
| @@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = { | |||
| 574 | .remove = lcc_msm8960_remove, | 574 | .remove = lcc_msm8960_remove, |
| 575 | .driver = { | 575 | .driver = { |
| 576 | .name = "lcc-msm8960", | 576 | .name = "lcc-msm8960", |
| 577 | .owner = THIS_MODULE, | ||
| 578 | .of_match_table = lcc_msm8960_match_table, | 577 | .of_match_table = lcc_msm8960_match_table, |
| 579 | }, | 578 | }, |
| 580 | }; | 579 | }; |
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 6ef89639a9f6..d21640634adf 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c | |||
| @@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw) | |||
| 84 | struct fapll_data *fd = to_fapll(hw); | 84 | struct fapll_data *fd = to_fapll(hw); |
| 85 | u32 v = readl_relaxed(fd->base); | 85 | u32 v = readl_relaxed(fd->base); |
| 86 | 86 | ||
| 87 | v |= (1 << FAPLL_MAIN_PLLEN); | 87 | v |= FAPLL_MAIN_PLLEN; |
| 88 | writel_relaxed(v, fd->base); | 88 | writel_relaxed(v, fd->base); |
| 89 | 89 | ||
| 90 | return 0; | 90 | return 0; |
| @@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw) | |||
| 95 | struct fapll_data *fd = to_fapll(hw); | 95 | struct fapll_data *fd = to_fapll(hw); |
| 96 | u32 v = readl_relaxed(fd->base); | 96 | u32 v = readl_relaxed(fd->base); |
| 97 | 97 | ||
| 98 | v &= ~(1 << FAPLL_MAIN_PLLEN); | 98 | v &= ~FAPLL_MAIN_PLLEN; |
| 99 | writel_relaxed(v, fd->base); | 99 | writel_relaxed(v, fd->base); |
| 100 | } | 100 | } |
| 101 | 101 | ||
| @@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw) | |||
| 104 | struct fapll_data *fd = to_fapll(hw); | 104 | struct fapll_data *fd = to_fapll(hw); |
| 105 | u32 v = readl_relaxed(fd->base); | 105 | u32 v = readl_relaxed(fd->base); |
| 106 | 106 | ||
| 107 | return v & (1 << FAPLL_MAIN_PLLEN); | 107 | return v & FAPLL_MAIN_PLLEN; |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, | 110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 5e98c6b1f284..82d2fbb20f7e 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
| @@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = { | |||
| 159 | 159 | ||
| 160 | static int exynos_cpufreq_probe(struct platform_device *pdev) | 160 | static int exynos_cpufreq_probe(struct platform_device *pdev) |
| 161 | { | 161 | { |
| 162 | struct device_node *cpus, *np; | 162 | struct device_node *cpu0; |
| 163 | int ret = -EINVAL; | 163 | int ret = -EINVAL; |
| 164 | 164 | ||
| 165 | exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); | 165 | exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); |
| @@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
| 206 | if (ret) | 206 | if (ret) |
| 207 | goto err_cpufreq_reg; | 207 | goto err_cpufreq_reg; |
| 208 | 208 | ||
| 209 | cpus = of_find_node_by_path("/cpus"); | 209 | cpu0 = of_get_cpu_node(0, NULL); |
| 210 | if (!cpus) { | 210 | if (!cpu0) { |
| 211 | pr_err("failed to find cpus node\n"); | 211 | pr_err("failed to find cpu0 node\n"); |
| 212 | return 0; | 212 | return 0; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | np = of_get_next_child(cpus, NULL); | 215 | if (of_find_property(cpu0, "#cooling-cells", NULL)) { |
| 216 | if (!np) { | 216 | cdev = of_cpufreq_cooling_register(cpu0, |
| 217 | pr_err("failed to find cpus child node\n"); | ||
| 218 | of_node_put(cpus); | ||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | |||
| 222 | if (of_find_property(np, "#cooling-cells", NULL)) { | ||
| 223 | cdev = of_cpufreq_cooling_register(np, | ||
| 224 | cpu_present_mask); | 217 | cpu_present_mask); |
| 225 | if (IS_ERR(cdev)) | 218 | if (IS_ERR(cdev)) |
| 226 | pr_err("running cpufreq without cooling device: %ld\n", | 219 | pr_err("running cpufreq without cooling device: %ld\n", |
| 227 | PTR_ERR(cdev)); | 220 | PTR_ERR(cdev)); |
| 228 | } | 221 | } |
| 229 | of_node_put(np); | ||
| 230 | of_node_put(cpus); | ||
| 231 | 222 | ||
| 232 | return 0; | 223 | return 0; |
| 233 | 224 | ||
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index bee5df7794d3..7cb4b766cf94 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
| 23 | #include <sysdev/fsl_soc.h> | 23 | #include <sysdev/fsl_soc.h> |
| 24 | 24 | ||
| 25 | #include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */ | ||
| 26 | |||
| 25 | /** | 27 | /** |
| 26 | * struct cpu_data - per CPU data struct | 28 | * struct cpu_data - per CPU data struct |
| 27 | * @parent: the parent node of cpu clock | 29 | * @parent: the parent node of cpu clock |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 4d534582514e..080bd2dbde4b 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -44,6 +44,12 @@ void disable_cpuidle(void) | |||
| 44 | off = 1; | 44 | off = 1; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | bool cpuidle_not_available(struct cpuidle_driver *drv, | ||
| 48 | struct cpuidle_device *dev) | ||
| 49 | { | ||
| 50 | return off || !initialized || !drv || !dev || !dev->enabled; | ||
| 51 | } | ||
| 52 | |||
| 47 | /** | 53 | /** |
| 48 | * cpuidle_play_dead - cpu off-lining | 54 | * cpuidle_play_dead - cpu off-lining |
| 49 | * | 55 | * |
| @@ -66,14 +72,8 @@ int cpuidle_play_dead(void) | |||
| 66 | return -ENODEV; | 72 | return -ENODEV; |
| 67 | } | 73 | } |
| 68 | 74 | ||
| 69 | /** | 75 | static int find_deepest_state(struct cpuidle_driver *drv, |
| 70 | * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. | 76 | struct cpuidle_device *dev, bool freeze) |
| 71 | * @drv: cpuidle driver for the given CPU. | ||
| 72 | * @dev: cpuidle device for the given CPU. | ||
| 73 | * @freeze: Whether or not the state should be suitable for suspend-to-idle. | ||
| 74 | */ | ||
| 75 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | ||
| 76 | struct cpuidle_device *dev, bool freeze) | ||
| 77 | { | 77 | { |
| 78 | unsigned int latency_req = 0; | 78 | unsigned int latency_req = 0; |
| 79 | int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; | 79 | int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; |
| @@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | |||
| 92 | return ret; | 92 | return ret; |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | /** | ||
| 96 | * cpuidle_find_deepest_state - Find the deepest available idle state. | ||
| 97 | * @drv: cpuidle driver for the given CPU. | ||
| 98 | * @dev: cpuidle device for the given CPU. | ||
| 99 | */ | ||
| 100 | int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | ||
| 101 | struct cpuidle_device *dev) | ||
| 102 | { | ||
| 103 | return find_deepest_state(drv, dev, false); | ||
| 104 | } | ||
| 105 | |||
| 95 | static void enter_freeze_proper(struct cpuidle_driver *drv, | 106 | static void enter_freeze_proper(struct cpuidle_driver *drv, |
| 96 | struct cpuidle_device *dev, int index) | 107 | struct cpuidle_device *dev, int index) |
| 97 | { | 108 | { |
| @@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, | |||
| 113 | 124 | ||
| 114 | /** | 125 | /** |
| 115 | * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. | 126 | * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. |
| 127 | * @drv: cpuidle driver for the given CPU. | ||
| 128 | * @dev: cpuidle device for the given CPU. | ||
| 116 | * | 129 | * |
| 117 | * If there are states with the ->enter_freeze callback, find the deepest of | 130 | * If there are states with the ->enter_freeze callback, find the deepest of |
| 118 | * them and enter it with frozen tick. Otherwise, find the deepest state | 131 | * them and enter it with frozen tick. |
| 119 | * available and enter it normally. | ||
| 120 | */ | 132 | */ |
| 121 | void cpuidle_enter_freeze(void) | 133 | int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
| 122 | { | 134 | { |
| 123 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | ||
| 124 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
| 125 | int index; | 135 | int index; |
| 126 | 136 | ||
| 127 | /* | 137 | /* |
| @@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void) | |||
| 129 | * that interrupts won't be enabled when it exits and allows the tick to | 139 | * that interrupts won't be enabled when it exits and allows the tick to |
| 130 | * be frozen safely. | 140 | * be frozen safely. |
| 131 | */ | 141 | */ |
| 132 | index = cpuidle_find_deepest_state(drv, dev, true); | 142 | index = find_deepest_state(drv, dev, true); |
| 133 | if (index >= 0) { | ||
| 134 | enter_freeze_proper(drv, dev, index); | ||
| 135 | return; | ||
| 136 | } | ||
| 137 | |||
| 138 | /* | ||
| 139 | * It is not safe to freeze the tick, find the deepest state available | ||
| 140 | * at all and try to enter it normally. | ||
| 141 | */ | ||
| 142 | index = cpuidle_find_deepest_state(drv, dev, false); | ||
| 143 | if (index >= 0) | 143 | if (index >= 0) |
| 144 | cpuidle_enter(drv, dev, index); | 144 | enter_freeze_proper(drv, dev, index); |
| 145 | else | ||
| 146 | arch_cpu_idle(); | ||
| 147 | 145 | ||
| 148 | /* Interrupts are enabled again here. */ | 146 | return index; |
| 149 | local_irq_disable(); | ||
| 150 | } | 147 | } |
| 151 | 148 | ||
| 152 | /** | 149 | /** |
| @@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
| 205 | */ | 202 | */ |
| 206 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | 203 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
| 207 | { | 204 | { |
| 208 | if (off || !initialized) | ||
| 209 | return -ENODEV; | ||
| 210 | |||
| 211 | if (!drv || !dev || !dev->enabled) | ||
| 212 | return -EBUSY; | ||
| 213 | |||
| 214 | return cpuidle_curr_governor->select(drv, dev); | 205 | return cpuidle_curr_governor->select(drv, dev); |
| 215 | } | 206 | } |
| 216 | 207 | ||
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index e5541117b3e9..50ef8bd8708b 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c | |||
| @@ -159,6 +159,9 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) | |||
| 159 | if (WARN_ON(timeout < 0)) | 159 | if (WARN_ON(timeout < 0)) |
| 160 | return -EINVAL; | 160 | return -EINVAL; |
| 161 | 161 | ||
| 162 | if (timeout == 0) | ||
| 163 | return fence_is_signaled(fence); | ||
| 164 | |||
| 162 | trace_fence_wait_start(fence); | 165 | trace_fence_wait_start(fence); |
| 163 | ret = fence->ops->wait(fence, intr, timeout); | 166 | ret = fence->ops->wait(fence, intr, timeout); |
| 164 | trace_fence_wait_end(fence); | 167 | trace_fence_wait_end(fence); |
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 3c97c8fa8d02..39920d77f288 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
| @@ -327,6 +327,9 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | |||
| 327 | unsigned seq, shared_count, i = 0; | 327 | unsigned seq, shared_count, i = 0; |
| 328 | long ret = timeout; | 328 | long ret = timeout; |
| 329 | 329 | ||
| 330 | if (!timeout) | ||
| 331 | return reservation_object_test_signaled_rcu(obj, wait_all); | ||
| 332 | |||
| 330 | retry: | 333 | retry: |
| 331 | fence = NULL; | 334 | fence = NULL; |
| 332 | shared_count = 0; | 335 | shared_count = 0; |
| @@ -402,8 +405,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence) | |||
| 402 | int ret = 1; | 405 | int ret = 1; |
| 403 | 406 | ||
| 404 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | 407 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { |
| 405 | int ret; | ||
| 406 | |||
| 407 | fence = fence_get_rcu(lfence); | 408 | fence = fence_get_rcu(lfence); |
| 408 | if (!fence) | 409 | if (!fence) |
| 409 | return -1; | 410 | return -1; |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 09e2825a547a..d9891d3461f6 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -664,7 +664,6 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |||
| 664 | struct at_xdmac_desc *first = NULL, *prev = NULL; | 664 | struct at_xdmac_desc *first = NULL, *prev = NULL; |
| 665 | unsigned int periods = buf_len / period_len; | 665 | unsigned int periods = buf_len / period_len; |
| 666 | int i; | 666 | int i; |
| 667 | u32 cfg; | ||
| 668 | 667 | ||
| 669 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", | 668 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", |
| 670 | __func__, &buf_addr, buf_len, period_len, | 669 | __func__, &buf_addr, buf_len, period_len, |
| @@ -700,17 +699,17 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |||
| 700 | if (direction == DMA_DEV_TO_MEM) { | 699 | if (direction == DMA_DEV_TO_MEM) { |
| 701 | desc->lld.mbr_sa = atchan->per_src_addr; | 700 | desc->lld.mbr_sa = atchan->per_src_addr; |
| 702 | desc->lld.mbr_da = buf_addr + i * period_len; | 701 | desc->lld.mbr_da = buf_addr + i * period_len; |
| 703 | cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | 702 | desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; |
| 704 | } else { | 703 | } else { |
| 705 | desc->lld.mbr_sa = buf_addr + i * period_len; | 704 | desc->lld.mbr_sa = buf_addr + i * period_len; |
| 706 | desc->lld.mbr_da = atchan->per_dst_addr; | 705 | desc->lld.mbr_da = atchan->per_dst_addr; |
| 707 | cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | 706 | desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; |
| 708 | } | 707 | } |
| 709 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 | 708 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 |
| 710 | | AT_XDMAC_MBR_UBC_NDEN | 709 | | AT_XDMAC_MBR_UBC_NDEN |
| 711 | | AT_XDMAC_MBR_UBC_NSEN | 710 | | AT_XDMAC_MBR_UBC_NSEN |
| 712 | | AT_XDMAC_MBR_UBC_NDE | 711 | | AT_XDMAC_MBR_UBC_NDE |
| 713 | | period_len >> at_xdmac_get_dwidth(cfg); | 712 | | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); |
| 714 | 713 | ||
| 715 | dev_dbg(chan2dev(chan), | 714 | dev_dbg(chan2dev(chan), |
| 716 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", | 715 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 455b7a4f1e87..a8ad05291b27 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
| @@ -626,7 +626,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |||
| 626 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); | 626 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); |
| 627 | 627 | ||
| 628 | /* Check if we have any interrupt from the DMAC */ | 628 | /* Check if we have any interrupt from the DMAC */ |
| 629 | if (!status) | 629 | if (!status || !dw->in_use) |
| 630 | return IRQ_NONE; | 630 | return IRQ_NONE; |
| 631 | 631 | ||
| 632 | /* | 632 | /* |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 77a6dcf25b98..194ec20c9408 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -230,6 +230,10 @@ static bool is_bwd_noraid(struct pci_dev *pdev) | |||
| 230 | switch (pdev->device) { | 230 | switch (pdev->device) { |
| 231 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | 231 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: |
| 232 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | 232 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: |
| 233 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: | ||
| 234 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: | ||
| 235 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: | ||
| 236 | case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: | ||
| 233 | return true; | 237 | return true; |
| 234 | default: | 238 | default: |
| 235 | return false; | 239 | return false; |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 8926f271904e..eb410044e1af 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
| @@ -219,6 +219,9 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | |||
| 219 | 219 | ||
| 220 | while (dint) { | 220 | while (dint) { |
| 221 | i = __ffs(dint); | 221 | i = __ffs(dint); |
| 222 | /* only handle interrupts belonging to pdma driver*/ | ||
| 223 | if (i >= pdev->dma_channels) | ||
| 224 | break; | ||
| 222 | dint &= (dint - 1); | 225 | dint &= (dint - 1); |
| 223 | phy = &pdev->phy[i]; | 226 | phy = &pdev->phy[i]; |
| 224 | ret = mmp_pdma_chan_handler(irq, phy); | 227 | ret = mmp_pdma_chan_handler(irq, phy); |
| @@ -999,6 +1002,9 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
| 999 | struct resource *iores; | 1002 | struct resource *iores; |
| 1000 | int i, ret, irq = 0; | 1003 | int i, ret, irq = 0; |
| 1001 | int dma_channels = 0, irq_num = 0; | 1004 | int dma_channels = 0, irq_num = 0; |
| 1005 | const enum dma_slave_buswidth widths = | ||
| 1006 | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | | ||
| 1007 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 1002 | 1008 | ||
| 1003 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | 1009 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); |
| 1004 | if (!pdev) | 1010 | if (!pdev) |
| @@ -1066,6 +1072,10 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
| 1066 | pdev->device.device_config = mmp_pdma_config; | 1072 | pdev->device.device_config = mmp_pdma_config; |
| 1067 | pdev->device.device_terminate_all = mmp_pdma_terminate_all; | 1073 | pdev->device.device_terminate_all = mmp_pdma_terminate_all; |
| 1068 | pdev->device.copy_align = PDMA_ALIGNMENT; | 1074 | pdev->device.copy_align = PDMA_ALIGNMENT; |
| 1075 | pdev->device.src_addr_widths = widths; | ||
| 1076 | pdev->device.dst_addr_widths = widths; | ||
| 1077 | pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
| 1078 | pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
| 1069 | 1079 | ||
| 1070 | if (pdev->dev->coherent_dma_mask) | 1080 | if (pdev->dev->coherent_dma_mask) |
| 1071 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | 1081 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 70c2fa9963cd..b6f4e1fc9c78 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
| @@ -110,7 +110,7 @@ struct mmp_tdma_chan { | |||
| 110 | struct tasklet_struct tasklet; | 110 | struct tasklet_struct tasklet; |
| 111 | 111 | ||
| 112 | struct mmp_tdma_desc *desc_arr; | 112 | struct mmp_tdma_desc *desc_arr; |
| 113 | phys_addr_t desc_arr_phys; | 113 | dma_addr_t desc_arr_phys; |
| 114 | int desc_num; | 114 | int desc_num; |
| 115 | enum dma_transfer_direction dir; | 115 | enum dma_transfer_direction dir; |
| 116 | dma_addr_t dev_addr; | 116 | dma_addr_t dev_addr; |
| @@ -166,9 +166,12 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) | |||
| 166 | static int mmp_tdma_disable_chan(struct dma_chan *chan) | 166 | static int mmp_tdma_disable_chan(struct dma_chan *chan) |
| 167 | { | 167 | { |
| 168 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | 168 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
| 169 | u32 tdcr; | ||
| 169 | 170 | ||
| 170 | writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, | 171 | tdcr = readl(tdmac->reg_base + TDCR); |
| 171 | tdmac->reg_base + TDCR); | 172 | tdcr |= TDCR_ABR; |
| 173 | tdcr &= ~TDCR_CHANEN; | ||
| 174 | writel(tdcr, tdmac->reg_base + TDCR); | ||
| 172 | 175 | ||
| 173 | tdmac->status = DMA_COMPLETE; | 176 | tdmac->status = DMA_COMPLETE; |
| 174 | 177 | ||
| @@ -296,12 +299,27 @@ static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac) | |||
| 296 | return -EAGAIN; | 299 | return -EAGAIN; |
| 297 | } | 300 | } |
| 298 | 301 | ||
| 302 | static size_t mmp_tdma_get_pos(struct mmp_tdma_chan *tdmac) | ||
| 303 | { | ||
| 304 | size_t reg; | ||
| 305 | |||
| 306 | if (tdmac->idx == 0) { | ||
| 307 | reg = __raw_readl(tdmac->reg_base + TDSAR); | ||
| 308 | reg -= tdmac->desc_arr[0].src_addr; | ||
| 309 | } else if (tdmac->idx == 1) { | ||
| 310 | reg = __raw_readl(tdmac->reg_base + TDDAR); | ||
| 311 | reg -= tdmac->desc_arr[0].dst_addr; | ||
| 312 | } else | ||
| 313 | return -EINVAL; | ||
| 314 | |||
| 315 | return reg; | ||
| 316 | } | ||
| 317 | |||
| 299 | static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) | 318 | static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) |
| 300 | { | 319 | { |
| 301 | struct mmp_tdma_chan *tdmac = dev_id; | 320 | struct mmp_tdma_chan *tdmac = dev_id; |
| 302 | 321 | ||
| 303 | if (mmp_tdma_clear_chan_irq(tdmac) == 0) { | 322 | if (mmp_tdma_clear_chan_irq(tdmac) == 0) { |
| 304 | tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len; | ||
| 305 | tasklet_schedule(&tdmac->tasklet); | 323 | tasklet_schedule(&tdmac->tasklet); |
| 306 | return IRQ_HANDLED; | 324 | return IRQ_HANDLED; |
| 307 | } else | 325 | } else |
| @@ -343,7 +361,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) | |||
| 343 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); | 361 | int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); |
| 344 | 362 | ||
| 345 | gpool = tdmac->pool; | 363 | gpool = tdmac->pool; |
| 346 | if (tdmac->desc_arr) | 364 | if (gpool && tdmac->desc_arr) |
| 347 | gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, | 365 | gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, |
| 348 | size); | 366 | size); |
| 349 | tdmac->desc_arr = NULL; | 367 | tdmac->desc_arr = NULL; |
| @@ -499,6 +517,7 @@ static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, | |||
| 499 | { | 517 | { |
| 500 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | 518 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
| 501 | 519 | ||
| 520 | tdmac->pos = mmp_tdma_get_pos(tdmac); | ||
| 502 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, | 521 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
| 503 | tdmac->buf_len - tdmac->pos); | 522 | tdmac->buf_len - tdmac->pos); |
| 504 | 523 | ||
| @@ -610,7 +629,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) | |||
| 610 | int i, ret; | 629 | int i, ret; |
| 611 | int irq = 0, irq_num = 0; | 630 | int irq = 0, irq_num = 0; |
| 612 | int chan_num = TDMA_CHANNEL_NUM; | 631 | int chan_num = TDMA_CHANNEL_NUM; |
| 613 | struct gen_pool *pool; | 632 | struct gen_pool *pool = NULL; |
| 614 | 633 | ||
| 615 | of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); | 634 | of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); |
| 616 | if (of_id) | 635 | if (of_id) |
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index d7a33b3ac466..9c914d625906 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c | |||
| @@ -162,9 +162,9 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = { | |||
| 162 | [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, | 162 | [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 }, |
| 163 | [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, | 163 | [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 }, |
| 164 | [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, | 164 | [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 }, |
| 165 | [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 }, | 165 | [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 }, |
| 166 | [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 }, | 166 | [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 }, |
| 167 | [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 }, | 167 | [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 }, |
| 168 | [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, | 168 | [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 }, |
| 169 | [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, | 169 | [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 }, |
| 170 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, | 170 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 }, |
| @@ -1143,6 +1143,10 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1143 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); | 1143 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); |
| 1144 | 1144 | ||
| 1145 | /* initialize dmaengine apis */ | 1145 | /* initialize dmaengine apis */ |
| 1146 | bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
| 1147 | bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
| 1148 | bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 1149 | bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 1146 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; | 1150 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; |
| 1147 | bdev->common.device_free_chan_resources = bam_free_chan; | 1151 | bdev->common.device_free_chan_resources = bam_free_chan; |
| 1148 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; | 1152 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index b2431aa30033..9f1d4c7dbab8 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
| @@ -582,15 +582,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | |||
| 582 | } | 582 | } |
| 583 | } | 583 | } |
| 584 | 584 | ||
| 585 | static void sh_dmae_shutdown(struct platform_device *pdev) | ||
| 586 | { | ||
| 587 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | ||
| 588 | sh_dmae_ctl_stop(shdev); | ||
| 589 | } | ||
| 590 | |||
| 591 | #ifdef CONFIG_PM | 585 | #ifdef CONFIG_PM |
| 592 | static int sh_dmae_runtime_suspend(struct device *dev) | 586 | static int sh_dmae_runtime_suspend(struct device *dev) |
| 593 | { | 587 | { |
| 588 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
| 589 | |||
| 590 | sh_dmae_ctl_stop(shdev); | ||
| 594 | return 0; | 591 | return 0; |
| 595 | } | 592 | } |
| 596 | 593 | ||
| @@ -605,6 +602,9 @@ static int sh_dmae_runtime_resume(struct device *dev) | |||
| 605 | #ifdef CONFIG_PM_SLEEP | 602 | #ifdef CONFIG_PM_SLEEP |
| 606 | static int sh_dmae_suspend(struct device *dev) | 603 | static int sh_dmae_suspend(struct device *dev) |
| 607 | { | 604 | { |
| 605 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
| 606 | |||
| 607 | sh_dmae_ctl_stop(shdev); | ||
| 608 | return 0; | 608 | return 0; |
| 609 | } | 609 | } |
| 610 | 610 | ||
| @@ -929,13 +929,12 @@ static int sh_dmae_remove(struct platform_device *pdev) | |||
| 929 | } | 929 | } |
| 930 | 930 | ||
| 931 | static struct platform_driver sh_dmae_driver = { | 931 | static struct platform_driver sh_dmae_driver = { |
| 932 | .driver = { | 932 | .driver = { |
| 933 | .pm = &sh_dmae_pm, | 933 | .pm = &sh_dmae_pm, |
| 934 | .name = SH_DMAE_DRV_NAME, | 934 | .name = SH_DMAE_DRV_NAME, |
| 935 | .of_match_table = sh_dmae_of_match, | 935 | .of_match_table = sh_dmae_of_match, |
| 936 | }, | 936 | }, |
| 937 | .remove = sh_dmae_remove, | 937 | .remove = sh_dmae_remove, |
| 938 | .shutdown = sh_dmae_shutdown, | ||
| 939 | }; | 938 | }; |
| 940 | 939 | ||
| 941 | static int __init sh_dmae_init(void) | 940 | static int __init sh_dmae_init(void) |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index c5f7b4e9eb6c..69fac068669f 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
| @@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s) | |||
| 78 | * We have to be cautious here. We have seen BIOSes with DMI pointers | 78 | * We have to be cautious here. We have seen BIOSes with DMI pointers |
| 79 | * pointing to completely the wrong place for example | 79 | * pointing to completely the wrong place for example |
| 80 | */ | 80 | */ |
| 81 | static void dmi_table(u8 *buf, int len, int num, | 81 | static void dmi_table(u8 *buf, u32 len, int num, |
| 82 | void (*decode)(const struct dmi_header *, void *), | 82 | void (*decode)(const struct dmi_header *, void *), |
| 83 | void *private_data) | 83 | void *private_data) |
| 84 | { | 84 | { |
| @@ -93,12 +93,6 @@ static void dmi_table(u8 *buf, int len, int num, | |||
| 93 | const struct dmi_header *dm = (const struct dmi_header *)data; | 93 | const struct dmi_header *dm = (const struct dmi_header *)data; |
| 94 | 94 | ||
| 95 | /* | 95 | /* |
| 96 | * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] | ||
| 97 | */ | ||
| 98 | if (dm->type == DMI_ENTRY_END_OF_TABLE) | ||
| 99 | break; | ||
| 100 | |||
| 101 | /* | ||
| 102 | * We want to know the total length (formatted area and | 96 | * We want to know the total length (formatted area and |
| 103 | * strings) before decoding to make sure we won't run off the | 97 | * strings) before decoding to make sure we won't run off the |
| 104 | * table in dmi_decode or dmi_string | 98 | * table in dmi_decode or dmi_string |
| @@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num, | |||
| 108 | data++; | 102 | data++; |
| 109 | if (data - buf < len - 1) | 103 | if (data - buf < len - 1) |
| 110 | decode(dm, private_data); | 104 | decode(dm, private_data); |
| 105 | |||
| 106 | /* | ||
| 107 | * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] | ||
| 108 | */ | ||
| 109 | if (dm->type == DMI_ENTRY_END_OF_TABLE) | ||
| 110 | break; | ||
| 111 | |||
| 111 | data += 2; | 112 | data += 2; |
| 112 | i++; | 113 | i++; |
| 113 | } | 114 | } |
| 114 | } | 115 | } |
| 115 | 116 | ||
| 116 | static phys_addr_t dmi_base; | 117 | static phys_addr_t dmi_base; |
| 117 | static u16 dmi_len; | 118 | static u32 dmi_len; |
| 118 | static u16 dmi_num; | 119 | static u16 dmi_num; |
| 119 | 120 | ||
| 120 | static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, | 121 | static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, |
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 2fe195002021..f07d4a67fa76 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c | |||
| @@ -179,12 +179,12 @@ again: | |||
| 179 | start = desc->phys_addr; | 179 | start = desc->phys_addr; |
| 180 | end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); | 180 | end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); |
| 181 | 181 | ||
| 182 | if ((start + size) > end || (start + size) > max) | 182 | if (end > max) |
| 183 | continue; | ||
| 184 | |||
| 185 | if (end - size > max) | ||
| 186 | end = max; | 183 | end = max; |
| 187 | 184 | ||
| 185 | if ((start + size) > end) | ||
| 186 | continue; | ||
| 187 | |||
| 188 | if (round_down(end - size, align) < start) | 188 | if (round_down(end - size, align) < start) |
| 189 | continue; | 189 | continue; |
| 190 | 190 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b6b07ff720b..f6d04c7b5115 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -43,9 +43,10 @@ | |||
| 43 | #include "drm_crtc_internal.h" | 43 | #include "drm_crtc_internal.h" |
| 44 | #include "drm_internal.h" | 44 | #include "drm_internal.h" |
| 45 | 45 | ||
| 46 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 46 | static struct drm_framebuffer * |
| 47 | struct drm_mode_fb_cmd2 *r, | 47 | internal_framebuffer_create(struct drm_device *dev, |
| 48 | struct drm_file *file_priv); | 48 | struct drm_mode_fb_cmd2 *r, |
| 49 | struct drm_file *file_priv); | ||
| 49 | 50 | ||
| 50 | /* Avoid boilerplate. I'm tired of typing. */ | 51 | /* Avoid boilerplate. I'm tired of typing. */ |
| 51 | #define DRM_ENUM_NAME_FN(fnname, list) \ | 52 | #define DRM_ENUM_NAME_FN(fnname, list) \ |
| @@ -2908,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, | |||
| 2908 | */ | 2909 | */ |
| 2909 | if (req->flags & DRM_MODE_CURSOR_BO) { | 2910 | if (req->flags & DRM_MODE_CURSOR_BO) { |
| 2910 | if (req->handle) { | 2911 | if (req->handle) { |
| 2911 | fb = add_framebuffer_internal(dev, &fbreq, file_priv); | 2912 | fb = internal_framebuffer_create(dev, &fbreq, file_priv); |
| 2912 | if (IS_ERR(fb)) { | 2913 | if (IS_ERR(fb)) { |
| 2913 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); | 2914 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); |
| 2914 | return PTR_ERR(fb); | 2915 | return PTR_ERR(fb); |
| 2915 | } | 2916 | } |
| 2916 | |||
| 2917 | drm_framebuffer_reference(fb); | ||
| 2918 | } else { | 2917 | } else { |
| 2919 | fb = NULL; | 2918 | fb = NULL; |
| 2920 | } | 2919 | } |
| @@ -3267,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) | |||
| 3267 | return 0; | 3266 | return 0; |
| 3268 | } | 3267 | } |
| 3269 | 3268 | ||
| 3270 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 3269 | static struct drm_framebuffer * |
| 3271 | struct drm_mode_fb_cmd2 *r, | 3270 | internal_framebuffer_create(struct drm_device *dev, |
| 3272 | struct drm_file *file_priv) | 3271 | struct drm_mode_fb_cmd2 *r, |
| 3272 | struct drm_file *file_priv) | ||
| 3273 | { | 3273 | { |
| 3274 | struct drm_mode_config *config = &dev->mode_config; | 3274 | struct drm_mode_config *config = &dev->mode_config; |
| 3275 | struct drm_framebuffer *fb; | 3275 | struct drm_framebuffer *fb; |
| @@ -3301,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
| 3301 | return fb; | 3301 | return fb; |
| 3302 | } | 3302 | } |
| 3303 | 3303 | ||
| 3304 | mutex_lock(&file_priv->fbs_lock); | ||
| 3305 | r->fb_id = fb->base.id; | ||
| 3306 | list_add(&fb->filp_head, &file_priv->fbs); | ||
| 3307 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
| 3308 | mutex_unlock(&file_priv->fbs_lock); | ||
| 3309 | |||
| 3310 | return fb; | 3304 | return fb; |
| 3311 | } | 3305 | } |
| 3312 | 3306 | ||
| @@ -3328,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
| 3328 | int drm_mode_addfb2(struct drm_device *dev, | 3322 | int drm_mode_addfb2(struct drm_device *dev, |
| 3329 | void *data, struct drm_file *file_priv) | 3323 | void *data, struct drm_file *file_priv) |
| 3330 | { | 3324 | { |
| 3325 | struct drm_mode_fb_cmd2 *r = data; | ||
| 3331 | struct drm_framebuffer *fb; | 3326 | struct drm_framebuffer *fb; |
| 3332 | 3327 | ||
| 3333 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3328 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
| 3334 | return -EINVAL; | 3329 | return -EINVAL; |
| 3335 | 3330 | ||
| 3336 | fb = add_framebuffer_internal(dev, data, file_priv); | 3331 | fb = internal_framebuffer_create(dev, r, file_priv); |
| 3337 | if (IS_ERR(fb)) | 3332 | if (IS_ERR(fb)) |
| 3338 | return PTR_ERR(fb); | 3333 | return PTR_ERR(fb); |
| 3339 | 3334 | ||
| 3335 | /* Transfer ownership to the filp for reaping on close */ | ||
| 3336 | |||
| 3337 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
| 3338 | mutex_lock(&file_priv->fbs_lock); | ||
| 3339 | r->fb_id = fb->base.id; | ||
| 3340 | list_add(&fb->filp_head, &file_priv->fbs); | ||
| 3341 | mutex_unlock(&file_priv->fbs_lock); | ||
| 3342 | |||
| 3340 | return 0; | 3343 | return 0; |
| 3341 | } | 3344 | } |
| 3342 | 3345 | ||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9a5b68717ec8..379ab4555756 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
| @@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, | |||
| 733 | struct drm_dp_sideband_msg_tx *txmsg) | 733 | struct drm_dp_sideband_msg_tx *txmsg) |
| 734 | { | 734 | { |
| 735 | bool ret; | 735 | bool ret; |
| 736 | mutex_lock(&mgr->qlock); | 736 | |
| 737 | /* | ||
| 738 | * All updates to txmsg->state are protected by mgr->qlock, and the two | ||
| 739 | * cases we check here are terminal states. For those the barriers | ||
| 740 | * provided by the wake_up/wait_event pair are enough. | ||
| 741 | */ | ||
| 737 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || | 742 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || |
| 738 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); | 743 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); |
| 739 | mutex_unlock(&mgr->qlock); | ||
| 740 | return ret; | 744 | return ret; |
| 741 | } | 745 | } |
| 742 | 746 | ||
| @@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, | |||
| 1363 | return 0; | 1367 | return 0; |
| 1364 | } | 1368 | } |
| 1365 | 1369 | ||
| 1366 | /* must be called holding qlock */ | ||
| 1367 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) | 1370 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) |
| 1368 | { | 1371 | { |
| 1369 | struct drm_dp_sideband_msg_tx *txmsg; | 1372 | struct drm_dp_sideband_msg_tx *txmsg; |
| 1370 | int ret; | 1373 | int ret; |
| 1371 | 1374 | ||
| 1375 | WARN_ON(!mutex_is_locked(&mgr->qlock)); | ||
| 1376 | |||
| 1372 | /* construct a chunk from the first msg in the tx_msg queue */ | 1377 | /* construct a chunk from the first msg in the tx_msg queue */ |
| 1373 | if (list_empty(&mgr->tx_msg_downq)) { | 1378 | if (list_empty(&mgr->tx_msg_downq)) { |
| 1374 | mgr->tx_down_in_progress = false; | 1379 | mgr->tx_down_in_progress = false; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04a209e2b66d..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
| @@ -91,29 +91,29 @@ | |||
| 91 | */ | 91 | */ |
| 92 | 92 | ||
| 93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
| 94 | unsigned long size, | 94 | u64 size, |
| 95 | unsigned alignment, | 95 | unsigned alignment, |
| 96 | unsigned long color, | 96 | unsigned long color, |
| 97 | enum drm_mm_search_flags flags); | 97 | enum drm_mm_search_flags flags); |
| 98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
| 99 | unsigned long size, | 99 | u64 size, |
| 100 | unsigned alignment, | 100 | unsigned alignment, |
| 101 | unsigned long color, | 101 | unsigned long color, |
| 102 | unsigned long start, | 102 | u64 start, |
| 103 | unsigned long end, | 103 | u64 end, |
| 104 | enum drm_mm_search_flags flags); | 104 | enum drm_mm_search_flags flags); |
| 105 | 105 | ||
| 106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | 106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
| 107 | struct drm_mm_node *node, | 107 | struct drm_mm_node *node, |
| 108 | unsigned long size, unsigned alignment, | 108 | u64 size, unsigned alignment, |
| 109 | unsigned long color, | 109 | unsigned long color, |
| 110 | enum drm_mm_allocator_flags flags) | 110 | enum drm_mm_allocator_flags flags) |
| 111 | { | 111 | { |
| 112 | struct drm_mm *mm = hole_node->mm; | 112 | struct drm_mm *mm = hole_node->mm; |
| 113 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 113 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
| 114 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 114 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
| 115 | unsigned long adj_start = hole_start; | 115 | u64 adj_start = hole_start; |
| 116 | unsigned long adj_end = hole_end; | 116 | u64 adj_end = hole_end; |
| 117 | 117 | ||
| 118 | BUG_ON(node->allocated); | 118 | BUG_ON(node->allocated); |
| 119 | 119 | ||
| @@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
| 124 | adj_start = adj_end - size; | 124 | adj_start = adj_end - size; |
| 125 | 125 | ||
| 126 | if (alignment) { | 126 | if (alignment) { |
| 127 | unsigned tmp = adj_start % alignment; | 127 | u64 tmp = adj_start; |
| 128 | if (tmp) { | 128 | unsigned rem; |
| 129 | |||
| 130 | rem = do_div(tmp, alignment); | ||
| 131 | if (rem) { | ||
| 129 | if (flags & DRM_MM_CREATE_TOP) | 132 | if (flags & DRM_MM_CREATE_TOP) |
| 130 | adj_start -= tmp; | 133 | adj_start -= rem; |
| 131 | else | 134 | else |
| 132 | adj_start += alignment - tmp; | 135 | adj_start += alignment - rem; |
| 133 | } | 136 | } |
| 134 | } | 137 | } |
| 135 | 138 | ||
| @@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, | |||
| 176 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) | 179 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
| 177 | { | 180 | { |
| 178 | struct drm_mm_node *hole; | 181 | struct drm_mm_node *hole; |
| 179 | unsigned long end = node->start + node->size; | 182 | u64 end = node->start + node->size; |
| 180 | unsigned long hole_start; | 183 | u64 hole_start; |
| 181 | unsigned long hole_end; | 184 | u64 hole_end; |
| 182 | 185 | ||
| 183 | BUG_ON(node == NULL); | 186 | BUG_ON(node == NULL); |
| 184 | 187 | ||
| @@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node); | |||
| 227 | * 0 on success, -ENOSPC if there's no suitable hole. | 230 | * 0 on success, -ENOSPC if there's no suitable hole. |
| 228 | */ | 231 | */ |
| 229 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, | 232 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
| 230 | unsigned long size, unsigned alignment, | 233 | u64 size, unsigned alignment, |
| 231 | unsigned long color, | 234 | unsigned long color, |
| 232 | enum drm_mm_search_flags sflags, | 235 | enum drm_mm_search_flags sflags, |
| 233 | enum drm_mm_allocator_flags aflags) | 236 | enum drm_mm_allocator_flags aflags) |
| @@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic); | |||
| 246 | 249 | ||
| 247 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | 250 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
| 248 | struct drm_mm_node *node, | 251 | struct drm_mm_node *node, |
| 249 | unsigned long size, unsigned alignment, | 252 | u64 size, unsigned alignment, |
| 250 | unsigned long color, | 253 | unsigned long color, |
| 251 | unsigned long start, unsigned long end, | 254 | u64 start, u64 end, |
| 252 | enum drm_mm_allocator_flags flags) | 255 | enum drm_mm_allocator_flags flags) |
| 253 | { | 256 | { |
| 254 | struct drm_mm *mm = hole_node->mm; | 257 | struct drm_mm *mm = hole_node->mm; |
| 255 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); | 258 | u64 hole_start = drm_mm_hole_node_start(hole_node); |
| 256 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); | 259 | u64 hole_end = drm_mm_hole_node_end(hole_node); |
| 257 | unsigned long adj_start = hole_start; | 260 | u64 adj_start = hole_start; |
| 258 | unsigned long adj_end = hole_end; | 261 | u64 adj_end = hole_end; |
| 259 | 262 | ||
| 260 | BUG_ON(!hole_node->hole_follows || node->allocated); | 263 | BUG_ON(!hole_node->hole_follows || node->allocated); |
| 261 | 264 | ||
| @@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
| 271 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); | 274 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
| 272 | 275 | ||
| 273 | if (alignment) { | 276 | if (alignment) { |
| 274 | unsigned tmp = adj_start % alignment; | 277 | u64 tmp = adj_start; |
| 275 | if (tmp) { | 278 | unsigned rem; |
| 279 | |||
| 280 | rem = do_div(tmp, alignment); | ||
| 281 | if (rem) { | ||
| 276 | if (flags & DRM_MM_CREATE_TOP) | 282 | if (flags & DRM_MM_CREATE_TOP) |
| 277 | adj_start -= tmp; | 283 | adj_start -= rem; |
| 278 | else | 284 | else |
| 279 | adj_start += alignment - tmp; | 285 | adj_start += alignment - rem; |
| 280 | } | 286 | } |
| 281 | } | 287 | } |
| 282 | 288 | ||
| @@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, | |||
| 324 | * 0 on success, -ENOSPC if there's no suitable hole. | 330 | * 0 on success, -ENOSPC if there's no suitable hole. |
| 325 | */ | 331 | */ |
| 326 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, | 332 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
| 327 | unsigned long size, unsigned alignment, | 333 | u64 size, unsigned alignment, |
| 328 | unsigned long color, | 334 | unsigned long color, |
| 329 | unsigned long start, unsigned long end, | 335 | u64 start, u64 end, |
| 330 | enum drm_mm_search_flags sflags, | 336 | enum drm_mm_search_flags sflags, |
| 331 | enum drm_mm_allocator_flags aflags) | 337 | enum drm_mm_allocator_flags aflags) |
| 332 | { | 338 | { |
| @@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node) | |||
| 387 | } | 393 | } |
| 388 | EXPORT_SYMBOL(drm_mm_remove_node); | 394 | EXPORT_SYMBOL(drm_mm_remove_node); |
| 389 | 395 | ||
| 390 | static int check_free_hole(unsigned long start, unsigned long end, | 396 | static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) |
| 391 | unsigned long size, unsigned alignment) | ||
| 392 | { | 397 | { |
| 393 | if (end - start < size) | 398 | if (end - start < size) |
| 394 | return 0; | 399 | return 0; |
| 395 | 400 | ||
| 396 | if (alignment) { | 401 | if (alignment) { |
| 397 | unsigned tmp = start % alignment; | 402 | u64 tmp = start; |
| 398 | if (tmp) | 403 | unsigned rem; |
| 399 | start += alignment - tmp; | 404 | |
| 405 | rem = do_div(tmp, alignment); | ||
| 406 | if (rem) | ||
| 407 | start += alignment - rem; | ||
| 400 | } | 408 | } |
| 401 | 409 | ||
| 402 | return end >= start + size; | 410 | return end >= start + size; |
| 403 | } | 411 | } |
| 404 | 412 | ||
| 405 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | 413 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
| 406 | unsigned long size, | 414 | u64 size, |
| 407 | unsigned alignment, | 415 | unsigned alignment, |
| 408 | unsigned long color, | 416 | unsigned long color, |
| 409 | enum drm_mm_search_flags flags) | 417 | enum drm_mm_search_flags flags) |
| 410 | { | 418 | { |
| 411 | struct drm_mm_node *entry; | 419 | struct drm_mm_node *entry; |
| 412 | struct drm_mm_node *best; | 420 | struct drm_mm_node *best; |
| 413 | unsigned long adj_start; | 421 | u64 adj_start; |
| 414 | unsigned long adj_end; | 422 | u64 adj_end; |
| 415 | unsigned long best_size; | 423 | u64 best_size; |
| 416 | 424 | ||
| 417 | BUG_ON(mm->scanned_blocks); | 425 | BUG_ON(mm->scanned_blocks); |
| 418 | 426 | ||
| @@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
| 421 | 429 | ||
| 422 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 430 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
| 423 | flags & DRM_MM_SEARCH_BELOW) { | 431 | flags & DRM_MM_SEARCH_BELOW) { |
| 424 | unsigned long hole_size = adj_end - adj_start; | 432 | u64 hole_size = adj_end - adj_start; |
| 425 | 433 | ||
| 426 | if (mm->color_adjust) { | 434 | if (mm->color_adjust) { |
| 427 | mm->color_adjust(entry, color, &adj_start, &adj_end); | 435 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
| @@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, | |||
| 445 | } | 453 | } |
| 446 | 454 | ||
| 447 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, | 455 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
| 448 | unsigned long size, | 456 | u64 size, |
| 449 | unsigned alignment, | 457 | unsigned alignment, |
| 450 | unsigned long color, | 458 | unsigned long color, |
| 451 | unsigned long start, | 459 | u64 start, |
| 452 | unsigned long end, | 460 | u64 end, |
| 453 | enum drm_mm_search_flags flags) | 461 | enum drm_mm_search_flags flags) |
| 454 | { | 462 | { |
| 455 | struct drm_mm_node *entry; | 463 | struct drm_mm_node *entry; |
| 456 | struct drm_mm_node *best; | 464 | struct drm_mm_node *best; |
| 457 | unsigned long adj_start; | 465 | u64 adj_start; |
| 458 | unsigned long adj_end; | 466 | u64 adj_end; |
| 459 | unsigned long best_size; | 467 | u64 best_size; |
| 460 | 468 | ||
| 461 | BUG_ON(mm->scanned_blocks); | 469 | BUG_ON(mm->scanned_blocks); |
| 462 | 470 | ||
| @@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ | |||
| 465 | 473 | ||
| 466 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, | 474 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
| 467 | flags & DRM_MM_SEARCH_BELOW) { | 475 | flags & DRM_MM_SEARCH_BELOW) { |
| 468 | unsigned long hole_size = adj_end - adj_start; | 476 | u64 hole_size = adj_end - adj_start; |
| 469 | 477 | ||
| 470 | if (adj_start < start) | 478 | if (adj_start < start) |
| 471 | adj_start = start; | 479 | adj_start = start; |
| @@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node); | |||
| 561 | * adding/removing nodes to/from the scan list are allowed. | 569 | * adding/removing nodes to/from the scan list are allowed. |
| 562 | */ | 570 | */ |
| 563 | void drm_mm_init_scan(struct drm_mm *mm, | 571 | void drm_mm_init_scan(struct drm_mm *mm, |
| 564 | unsigned long size, | 572 | u64 size, |
| 565 | unsigned alignment, | 573 | unsigned alignment, |
| 566 | unsigned long color) | 574 | unsigned long color) |
| 567 | { | 575 | { |
| @@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan); | |||
| 594 | * adding/removing nodes to/from the scan list are allowed. | 602 | * adding/removing nodes to/from the scan list are allowed. |
| 595 | */ | 603 | */ |
| 596 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | 604 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
| 597 | unsigned long size, | 605 | u64 size, |
| 598 | unsigned alignment, | 606 | unsigned alignment, |
| 599 | unsigned long color, | 607 | unsigned long color, |
| 600 | unsigned long start, | 608 | u64 start, |
| 601 | unsigned long end) | 609 | u64 end) |
| 602 | { | 610 | { |
| 603 | mm->scan_color = color; | 611 | mm->scan_color = color; |
| 604 | mm->scan_alignment = alignment; | 612 | mm->scan_alignment = alignment; |
| @@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node) | |||
| 627 | { | 635 | { |
| 628 | struct drm_mm *mm = node->mm; | 636 | struct drm_mm *mm = node->mm; |
| 629 | struct drm_mm_node *prev_node; | 637 | struct drm_mm_node *prev_node; |
| 630 | unsigned long hole_start, hole_end; | 638 | u64 hole_start, hole_end; |
| 631 | unsigned long adj_start, adj_end; | 639 | u64 adj_start, adj_end; |
| 632 | 640 | ||
| 633 | mm->scanned_blocks++; | 641 | mm->scanned_blocks++; |
| 634 | 642 | ||
| @@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean); | |||
| 731 | * | 739 | * |
| 732 | * Note that @mm must be cleared to 0 before calling this function. | 740 | * Note that @mm must be cleared to 0 before calling this function. |
| 733 | */ | 741 | */ |
| 734 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 742 | void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) |
| 735 | { | 743 | { |
| 736 | INIT_LIST_HEAD(&mm->hole_stack); | 744 | INIT_LIST_HEAD(&mm->hole_stack); |
| 737 | mm->scanned_blocks = 0; | 745 | mm->scanned_blocks = 0; |
| @@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm) | |||
| 766 | } | 774 | } |
| 767 | EXPORT_SYMBOL(drm_mm_takedown); | 775 | EXPORT_SYMBOL(drm_mm_takedown); |
| 768 | 776 | ||
| 769 | static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | 777 | static u64 drm_mm_debug_hole(struct drm_mm_node *entry, |
| 770 | const char *prefix) | 778 | const char *prefix) |
| 771 | { | 779 | { |
| 772 | unsigned long hole_start, hole_end, hole_size; | 780 | u64 hole_start, hole_end, hole_size; |
| 773 | 781 | ||
| 774 | if (entry->hole_follows) { | 782 | if (entry->hole_follows) { |
| 775 | hole_start = drm_mm_hole_node_start(entry); | 783 | hole_start = drm_mm_hole_node_start(entry); |
| 776 | hole_end = drm_mm_hole_node_end(entry); | 784 | hole_end = drm_mm_hole_node_end(entry); |
| 777 | hole_size = hole_end - hole_start; | 785 | hole_size = hole_end - hole_start; |
| 778 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", | 786 | pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, |
| 779 | prefix, hole_start, hole_end, | 787 | hole_end, hole_size); |
| 780 | hole_size); | ||
| 781 | return hole_size; | 788 | return hole_size; |
| 782 | } | 789 | } |
| 783 | 790 | ||
| @@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, | |||
| 792 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | 799 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
| 793 | { | 800 | { |
| 794 | struct drm_mm_node *entry; | 801 | struct drm_mm_node *entry; |
| 795 | unsigned long total_used = 0, total_free = 0, total = 0; | 802 | u64 total_used = 0, total_free = 0, total = 0; |
| 796 | 803 | ||
| 797 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); | 804 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
| 798 | 805 | ||
| 799 | drm_mm_for_each_node(entry, mm) { | 806 | drm_mm_for_each_node(entry, mm) { |
| 800 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", | 807 | pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, |
| 801 | prefix, entry->start, entry->start + entry->size, | 808 | entry->start + entry->size, entry->size); |
| 802 | entry->size); | ||
| 803 | total_used += entry->size; | 809 | total_used += entry->size; |
| 804 | total_free += drm_mm_debug_hole(entry, prefix); | 810 | total_free += drm_mm_debug_hole(entry, prefix); |
| 805 | } | 811 | } |
| 806 | total = total_free + total_used; | 812 | total = total_free + total_used; |
| 807 | 813 | ||
| 808 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, | 814 | pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, |
| 809 | total_used, total_free); | 815 | total_used, total_free); |
| 810 | } | 816 | } |
| 811 | EXPORT_SYMBOL(drm_mm_debug_table); | 817 | EXPORT_SYMBOL(drm_mm_debug_table); |
| 812 | 818 | ||
| 813 | #if defined(CONFIG_DEBUG_FS) | 819 | #if defined(CONFIG_DEBUG_FS) |
| 814 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) | 820 | static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
| 815 | { | 821 | { |
| 816 | unsigned long hole_start, hole_end, hole_size; | 822 | u64 hole_start, hole_end, hole_size; |
| 817 | 823 | ||
| 818 | if (entry->hole_follows) { | 824 | if (entry->hole_follows) { |
| 819 | hole_start = drm_mm_hole_node_start(entry); | 825 | hole_start = drm_mm_hole_node_start(entry); |
| 820 | hole_end = drm_mm_hole_node_end(entry); | 826 | hole_end = drm_mm_hole_node_end(entry); |
| 821 | hole_size = hole_end - hole_start; | 827 | hole_size = hole_end - hole_start; |
| 822 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", | 828 | seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start, |
| 823 | hole_start, hole_end, hole_size); | 829 | hole_end, hole_size); |
| 824 | return hole_size; | 830 | return hole_size; |
| 825 | } | 831 | } |
| 826 | 832 | ||
| @@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en | |||
| 835 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 841 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
| 836 | { | 842 | { |
| 837 | struct drm_mm_node *entry; | 843 | struct drm_mm_node *entry; |
| 838 | unsigned long total_used = 0, total_free = 0, total = 0; | 844 | u64 total_used = 0, total_free = 0, total = 0; |
| 839 | 845 | ||
| 840 | total_free += drm_mm_dump_hole(m, &mm->head_node); | 846 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
| 841 | 847 | ||
| 842 | drm_mm_for_each_node(entry, mm) { | 848 | drm_mm_for_each_node(entry, mm) { |
| 843 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", | 849 | seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start, |
| 844 | entry->start, entry->start + entry->size, | 850 | entry->start + entry->size, entry->size); |
| 845 | entry->size); | ||
| 846 | total_used += entry->size; | 851 | total_used += entry->size; |
| 847 | total_free += drm_mm_dump_hole(m, entry); | 852 | total_free += drm_mm_dump_hole(m, entry); |
| 848 | } | 853 | } |
| 849 | total = total_free + total_used; | 854 | total = total_free + total_used; |
| 850 | 855 | ||
| 851 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); | 856 | seq_printf(m, "total: %llu, used %llu free %llu\n", total, |
| 857 | total_used, total_free); | ||
| 852 | return 0; | 858 | return 0; |
| 853 | } | 859 | } |
| 854 | EXPORT_SYMBOL(drm_mm_dump_table); | 860 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 96e811fe24ca..e8b18e542da4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
| 152 | seq_puts(m, " (pp"); | 152 | seq_puts(m, " (pp"); |
| 153 | else | 153 | else |
| 154 | seq_puts(m, " (g"); | 154 | seq_puts(m, " (g"); |
| 155 | seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", | 155 | seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)", |
| 156 | vma->node.start, vma->node.size, | 156 | vma->node.start, vma->node.size, |
| 157 | vma->ggtt_view.type); | 157 | vma->ggtt_view.type); |
| 158 | } | 158 | } |
| 159 | if (obj->stolen) | 159 | if (obj->stolen) |
| 160 | seq_printf(m, " (stolen: %08lx)", obj->stolen->start); | 160 | seq_printf(m, " (stolen: %08llx)", obj->stolen->start); |
| 161 | if (obj->pin_mappable || obj->fault_mappable) { | 161 | if (obj->pin_mappable || obj->fault_mappable) { |
| 162 | char s[3], *t = s; | 162 | char s[3], *t = s; |
| 163 | if (obj->pin_mappable) | 163 | if (obj->pin_mappable) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8039cec71fc2..cc6ea53d2b81 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
| 622 | return 0; | 622 | return 0; |
| 623 | } | 623 | } |
| 624 | 624 | ||
| 625 | static int i915_drm_suspend_late(struct drm_device *drm_dev) | 625 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
| 626 | { | 626 | { |
| 627 | struct drm_i915_private *dev_priv = drm_dev->dev_private; | 627 | struct drm_i915_private *dev_priv = drm_dev->dev_private; |
| 628 | int ret; | 628 | int ret; |
| @@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev) | |||
| 636 | } | 636 | } |
| 637 | 637 | ||
| 638 | pci_disable_device(drm_dev->pdev); | 638 | pci_disable_device(drm_dev->pdev); |
| 639 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | 639 | /* |
| 640 | * During hibernation on some GEN4 platforms the BIOS may try to access | ||
| 641 | * the device even though it's already in D3 and hang the machine. So | ||
| 642 | * leave the device in D0 on those platforms and hope the BIOS will | ||
| 643 | * power down the device properly. Platforms where this was seen: | ||
| 644 | * Lenovo Thinkpad X301, X61s | ||
| 645 | */ | ||
| 646 | if (!(hibernation && | ||
| 647 | drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO && | ||
| 648 | INTEL_INFO(dev_priv)->gen == 4)) | ||
| 649 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); | ||
| 640 | 650 | ||
| 641 | return 0; | 651 | return 0; |
| 642 | } | 652 | } |
| @@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) | |||
| 662 | if (error) | 672 | if (error) |
| 663 | return error; | 673 | return error; |
| 664 | 674 | ||
| 665 | return i915_drm_suspend_late(dev); | 675 | return i915_drm_suspend_late(dev, false); |
| 666 | } | 676 | } |
| 667 | 677 | ||
| 668 | static int i915_drm_resume(struct drm_device *dev) | 678 | static int i915_drm_resume(struct drm_device *dev) |
| @@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev) | |||
| 950 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | 960 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
| 951 | return 0; | 961 | return 0; |
| 952 | 962 | ||
| 953 | return i915_drm_suspend_late(drm_dev); | 963 | return i915_drm_suspend_late(drm_dev, false); |
| 964 | } | ||
| 965 | |||
| 966 | static int i915_pm_poweroff_late(struct device *dev) | ||
| 967 | { | ||
| 968 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; | ||
| 969 | |||
| 970 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) | ||
| 971 | return 0; | ||
| 972 | |||
| 973 | return i915_drm_suspend_late(drm_dev, true); | ||
| 954 | } | 974 | } |
| 955 | 975 | ||
| 956 | static int i915_pm_resume_early(struct device *dev) | 976 | static int i915_pm_resume_early(struct device *dev) |
| @@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = { | |||
| 1520 | .thaw_early = i915_pm_resume_early, | 1540 | .thaw_early = i915_pm_resume_early, |
| 1521 | .thaw = i915_pm_resume, | 1541 | .thaw = i915_pm_resume, |
| 1522 | .poweroff = i915_pm_suspend, | 1542 | .poweroff = i915_pm_suspend, |
| 1523 | .poweroff_late = i915_pm_suspend_late, | 1543 | .poweroff_late = i915_pm_poweroff_late, |
| 1524 | .restore_early = i915_pm_resume_early, | 1544 | .restore_early = i915_pm_resume_early, |
| 1525 | .restore = i915_pm_resume, | 1545 | .restore = i915_pm_resume, |
| 1526 | 1546 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5daad5f75fb..5b205863b659 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2936,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
| 2936 | req = obj->last_read_req; | 2936 | req = obj->last_read_req; |
| 2937 | 2937 | ||
| 2938 | /* Do this after OLR check to make sure we make forward progress polling | 2938 | /* Do this after OLR check to make sure we make forward progress polling |
| 2939 | * on this IOCTL with a timeout <=0 (like busy ioctl) | 2939 | * on this IOCTL with a timeout == 0 (like busy ioctl) |
| 2940 | */ | 2940 | */ |
| 2941 | if (args->timeout_ns <= 0) { | 2941 | if (args->timeout_ns == 0) { |
| 2942 | ret = -ETIME; | 2942 | ret = -ETIME; |
| 2943 | goto out; | 2943 | goto out; |
| 2944 | } | 2944 | } |
| @@ -2948,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
| 2948 | i915_gem_request_reference(req); | 2948 | i915_gem_request_reference(req); |
| 2949 | mutex_unlock(&dev->struct_mutex); | 2949 | mutex_unlock(&dev->struct_mutex); |
| 2950 | 2950 | ||
| 2951 | ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, | 2951 | ret = __i915_wait_request(req, reset_counter, true, |
| 2952 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, | ||
| 2952 | file->driver_priv); | 2953 | file->driver_priv); |
| 2953 | mutex_lock(&dev->struct_mutex); | 2954 | mutex_lock(&dev->struct_mutex); |
| 2954 | i915_gem_request_unreference(req); | 2955 | i915_gem_request_unreference(req); |
| @@ -4792,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4792 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 4793 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
| 4793 | return -EIO; | 4794 | return -EIO; |
| 4794 | 4795 | ||
| 4796 | /* Double layer security blanket, see i915_gem_init() */ | ||
| 4797 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
| 4798 | |||
| 4795 | if (dev_priv->ellc_size) | 4799 | if (dev_priv->ellc_size) |
| 4796 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 4800 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
| 4797 | 4801 | ||
| @@ -4824,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4824 | for_each_ring(ring, dev_priv, i) { | 4828 | for_each_ring(ring, dev_priv, i) { |
| 4825 | ret = ring->init_hw(ring); | 4829 | ret = ring->init_hw(ring); |
| 4826 | if (ret) | 4830 | if (ret) |
| 4827 | return ret; | 4831 | goto out; |
| 4828 | } | 4832 | } |
| 4829 | 4833 | ||
| 4830 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4834 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
| @@ -4841,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4841 | DRM_ERROR("Context enable failed %d\n", ret); | 4845 | DRM_ERROR("Context enable failed %d\n", ret); |
| 4842 | i915_gem_cleanup_ringbuffer(dev); | 4846 | i915_gem_cleanup_ringbuffer(dev); |
| 4843 | 4847 | ||
| 4844 | return ret; | 4848 | goto out; |
| 4845 | } | 4849 | } |
| 4846 | 4850 | ||
| 4851 | out: | ||
| 4852 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
| 4847 | return ret; | 4853 | return ret; |
| 4848 | } | 4854 | } |
| 4849 | 4855 | ||
| @@ -4877,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev) | |||
| 4877 | dev_priv->gt.stop_ring = intel_logical_ring_stop; | 4883 | dev_priv->gt.stop_ring = intel_logical_ring_stop; |
| 4878 | } | 4884 | } |
| 4879 | 4885 | ||
| 4886 | /* This is just a security blanket to placate dragons. | ||
| 4887 | * On some systems, we very sporadically observe that the first TLBs | ||
| 4888 | * used by the CS may be stale, despite us poking the TLB reset. If | ||
| 4889 | * we hold the forcewake during initialisation these problems | ||
| 4890 | * just magically go away. | ||
| 4891 | */ | ||
| 4892 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
| 4893 | |||
| 4880 | ret = i915_gem_init_userptr(dev); | 4894 | ret = i915_gem_init_userptr(dev); |
| 4881 | if (ret) | 4895 | if (ret) |
| 4882 | goto out_unlock; | 4896 | goto out_unlock; |
| @@ -4903,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev) | |||
| 4903 | } | 4917 | } |
| 4904 | 4918 | ||
| 4905 | out_unlock: | 4919 | out_unlock: |
| 4920 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
| 4906 | mutex_unlock(&dev->struct_mutex); | 4921 | mutex_unlock(&dev->struct_mutex); |
| 4907 | 4922 | ||
| 4908 | return ret; | 4923 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 746f77fb57a3..dccdc8aad2e2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
| 1145 | 1145 | ||
| 1146 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); | 1146 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
| 1147 | 1147 | ||
| 1148 | DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", | 1148 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
| 1149 | ppgtt->node.size >> 20, | 1149 | ppgtt->node.size >> 20, |
| 1150 | ppgtt->node.start / PAGE_SIZE); | 1150 | ppgtt->node.start / PAGE_SIZE); |
| 1151 | 1151 | ||
| @@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) | |||
| 1713 | 1713 | ||
| 1714 | static void i915_gtt_color_adjust(struct drm_mm_node *node, | 1714 | static void i915_gtt_color_adjust(struct drm_mm_node *node, |
| 1715 | unsigned long color, | 1715 | unsigned long color, |
| 1716 | unsigned long *start, | 1716 | u64 *start, |
| 1717 | unsigned long *end) | 1717 | u64 *end) |
| 1718 | { | 1718 | { |
| 1719 | if (node->color != color) | 1719 | if (node->color != color) |
| 1720 | *start += 4096; | 1720 | *start += 4096; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e730789b53b7..9943c20a741d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -9716,7 +9716,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) | |||
| 9716 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 9716 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 9717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 9718 | 9718 | ||
| 9719 | WARN_ON(!in_irq()); | 9719 | WARN_ON(!in_interrupt()); |
| 9720 | 9720 | ||
| 9721 | if (crtc == NULL) | 9721 | if (crtc == NULL) |
| 9722 | return; | 9722 | return; |
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 04e248dd2259..54daa66c6970 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c | |||
| @@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
| 282 | return ret; | 282 | return ret; |
| 283 | } | 283 | } |
| 284 | 284 | ||
| 285 | static bool | ||
| 286 | __cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv, | ||
| 287 | enum pipe pipe) | ||
| 288 | { | ||
| 289 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 290 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 291 | |||
| 292 | return !intel_crtc->cpu_fifo_underrun_disabled; | ||
| 293 | } | ||
| 294 | |||
| 295 | /** | 285 | /** |
| 296 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state | 286 | * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state |
| 297 | * @dev_priv: i915 device instance | 287 | * @dev_priv: i915 device instance |
| @@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, | |||
| 352 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, | 342 | void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, |
| 353 | enum pipe pipe) | 343 | enum pipe pipe) |
| 354 | { | 344 | { |
| 345 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
| 346 | |||
| 347 | /* We may be called too early in init, thanks BIOS! */ | ||
| 348 | if (crtc == NULL) | ||
| 349 | return; | ||
| 350 | |||
| 355 | /* GMCH can't disable fifo underruns, filter them. */ | 351 | /* GMCH can't disable fifo underruns, filter them. */ |
| 356 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && | 352 | if (HAS_GMCH_DISPLAY(dev_priv->dev) && |
| 357 | !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) | 353 | to_intel_crtc(crtc)->cpu_fifo_underrun_disabled) |
| 358 | return; | 354 | return; |
| 359 | 355 | ||
| 360 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) | 356 | if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c47a3baa53d5..4e8fb891d4ea 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
| @@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) | |||
| 1048 | 1048 | ||
| 1049 | /* We need to init first for ECOBUS access and then | 1049 | /* We need to init first for ECOBUS access and then |
| 1050 | * determine later if we want to reinit, in case of MT access is | 1050 | * determine later if we want to reinit, in case of MT access is |
| 1051 | * not working | 1051 | * not working. In this stage we don't know which flavour this |
| 1052 | * ivb is, so it is better to reset also the gen6 fw registers | ||
| 1053 | * before the ecobus check. | ||
| 1052 | */ | 1054 | */ |
| 1055 | |||
| 1056 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | ||
| 1057 | __raw_posting_read(dev_priv, ECOBUS); | ||
| 1058 | |||
| 1053 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | 1059 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
| 1054 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); | 1060 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
| 1055 | 1061 | ||
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 121d30ca2d44..87fe8ed92ebe 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c | |||
| @@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = { | |||
| 70 | 118800000, { 0x091c, 0x091c, 0x06dc }, | 70 | 118800000, { 0x091c, 0x091c, 0x06dc }, |
| 71 | }, { | 71 | }, { |
| 72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, | 72 | 216000000, { 0x06dc, 0x0b5c, 0x091c }, |
| 73 | } | 73 | }, { |
| 74 | ~0UL, { 0x0000, 0x0000, 0x0000 }, | ||
| 75 | }, | ||
| 74 | }; | 76 | }; |
| 75 | 77 | ||
| 76 | static const struct dw_hdmi_sym_term imx_sym_term[] = { | 78 | static const struct dw_hdmi_sym_term imx_sym_term[] = { |
| @@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { | |||
| 136 | .destroy = drm_encoder_cleanup, | 138 | .destroy = drm_encoder_cleanup, |
| 137 | }; | 139 | }; |
| 138 | 140 | ||
| 141 | static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con, | ||
| 142 | struct drm_display_mode *mode) | ||
| 143 | { | ||
| 144 | if (mode->clock < 13500) | ||
| 145 | return MODE_CLOCK_LOW; | ||
| 146 | if (mode->clock > 266000) | ||
| 147 | return MODE_CLOCK_HIGH; | ||
| 148 | |||
| 149 | return MODE_OK; | ||
| 150 | } | ||
| 151 | |||
| 152 | static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con, | ||
| 153 | struct drm_display_mode *mode) | ||
| 154 | { | ||
| 155 | if (mode->clock < 13500) | ||
| 156 | return MODE_CLOCK_LOW; | ||
| 157 | if (mode->clock > 270000) | ||
| 158 | return MODE_CLOCK_HIGH; | ||
| 159 | |||
| 160 | return MODE_OK; | ||
| 161 | } | ||
| 162 | |||
| 139 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { | 163 | static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { |
| 140 | .mpll_cfg = imx_mpll_cfg, | 164 | .mpll_cfg = imx_mpll_cfg, |
| 141 | .cur_ctr = imx_cur_ctr, | 165 | .cur_ctr = imx_cur_ctr, |
| 142 | .sym_term = imx_sym_term, | 166 | .sym_term = imx_sym_term, |
| 143 | .dev_type = IMX6Q_HDMI, | 167 | .dev_type = IMX6Q_HDMI, |
| 168 | .mode_valid = imx6q_hdmi_mode_valid, | ||
| 144 | }; | 169 | }; |
| 145 | 170 | ||
| 146 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | 171 | static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { |
| @@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { | |||
| 148 | .cur_ctr = imx_cur_ctr, | 173 | .cur_ctr = imx_cur_ctr, |
| 149 | .sym_term = imx_sym_term, | 174 | .sym_term = imx_sym_term, |
| 150 | .dev_type = IMX6DL_HDMI, | 175 | .dev_type = IMX6DL_HDMI, |
| 176 | .mode_valid = imx6dl_hdmi_mode_valid, | ||
| 151 | }; | 177 | }; |
| 152 | 178 | ||
| 153 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { | 179 | static const struct of_device_id dw_hdmi_imx_dt_ids[] = { |
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 1b86aac0b341..2d6dc94e1e64 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
| @@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder) | |||
| 163 | { | 163 | { |
| 164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 164 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
| 165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 165 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
| 166 | struct drm_display_mode *mode = &encoder->crtc->hwmode; | ||
| 167 | u32 pixel_fmt; | 166 | u32 pixel_fmt; |
| 168 | unsigned long serial_clk; | ||
| 169 | unsigned long di_clk = mode->clock * 1000; | ||
| 170 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
| 171 | |||
| 172 | if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { | ||
| 173 | /* dual channel LVDS mode */ | ||
| 174 | serial_clk = 3500UL * mode->clock; | ||
| 175 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
| 176 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
| 177 | } else { | ||
| 178 | serial_clk = 7000UL * mode->clock; | ||
| 179 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
| 180 | di_clk); | ||
| 181 | } | ||
| 182 | 167 | ||
| 183 | switch (imx_ldb_ch->chno) { | 168 | switch (imx_ldb_ch->chno) { |
| 184 | case 0: | 169 | case 0: |
| @@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
| 247 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); | 232 | struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); |
| 248 | struct imx_ldb *ldb = imx_ldb_ch->ldb; | 233 | struct imx_ldb *ldb = imx_ldb_ch->ldb; |
| 249 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; | 234 | int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; |
| 235 | unsigned long serial_clk; | ||
| 236 | unsigned long di_clk = mode->clock * 1000; | ||
| 237 | int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); | ||
| 250 | 238 | ||
| 251 | if (mode->clock > 170000) { | 239 | if (mode->clock > 170000) { |
| 252 | dev_warn(ldb->dev, | 240 | dev_warn(ldb->dev, |
| @@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, | |||
| 257 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); | 245 | "%s: mode exceeds 85 MHz pixel clock\n", __func__); |
| 258 | } | 246 | } |
| 259 | 247 | ||
| 248 | if (dual) { | ||
| 249 | serial_clk = 3500UL * mode->clock; | ||
| 250 | imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); | ||
| 251 | imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); | ||
| 252 | } else { | ||
| 253 | serial_clk = 7000UL * mode->clock; | ||
| 254 | imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, | ||
| 255 | di_clk); | ||
| 256 | } | ||
| 257 | |||
| 260 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ | 258 | /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ |
| 261 | if (imx_ldb_ch == &ldb->channel[0]) { | 259 | if (imx_ldb_ch == &ldb->channel[0]) { |
| 262 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) | 260 | if (mode->flags & DRM_MODE_FLAG_NVSYNC) |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 5e83e007080f..900dda6a8e71 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
| @@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); | 238 | panel_node = of_parse_phandle(np, "fsl,panel", 0); |
| 239 | if (panel_node) | 239 | if (panel_node) { |
| 240 | imxpd->panel = of_drm_find_panel(panel_node); | 240 | imxpd->panel = of_drm_find_panel(panel_node); |
| 241 | if (!imxpd->panel) | ||
| 242 | return -EPROBE_DEFER; | ||
| 243 | } | ||
| 241 | 244 | ||
| 242 | imxpd->dev = dev; | 245 | imxpd->dev = dev; |
| 243 | 246 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index 8edd531cb621..7369ee7f0c55 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c | |||
| @@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
| 32 | void mdp4_irq_preinstall(struct msm_kms *kms) | 32 | void mdp4_irq_preinstall(struct msm_kms *kms) |
| 33 | { | 33 | { |
| 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 34 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
| 35 | mdp4_enable(mdp4_kms); | ||
| 35 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); | 36 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); |
| 37 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | ||
| 38 | mdp4_disable(mdp4_kms); | ||
| 36 | } | 39 | } |
| 37 | 40 | ||
| 38 | int mdp4_irq_postinstall(struct msm_kms *kms) | 41 | int mdp4_irq_postinstall(struct msm_kms *kms) |
| @@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms) | |||
| 53 | void mdp4_irq_uninstall(struct msm_kms *kms) | 56 | void mdp4_irq_uninstall(struct msm_kms *kms) |
| 54 | { | 57 | { |
| 55 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); | 58 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); |
| 59 | mdp4_enable(mdp4_kms); | ||
| 56 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); | 60 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); |
| 61 | mdp4_disable(mdp4_kms); | ||
| 57 | } | 62 | } |
| 58 | 63 | ||
| 59 | irqreturn_t mdp4_irq(struct msm_kms *kms) | 64 | irqreturn_t mdp4_irq(struct msm_kms *kms) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 09b4a25eb553..c276624290af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h | |||
| @@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/ | |||
| 8 | git clone https://github.com/freedreno/envytools.git | 8 | git clone https://github.com/freedreno/envytools.git |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) | 11 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41) |
| 12 | - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) | 12 | - /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) |
| 13 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) | 13 | - /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19) |
| 14 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) | ||
| 15 | - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) | ||
| 16 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) | ||
| 17 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) | ||
| 18 | - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) | ||
| 19 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) | ||
| 20 | - /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) | ||
| 21 | - /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) | ||
| 22 | 14 | ||
| 23 | Copyright (C) 2013-2015 by the following authors: | 15 | Copyright (C) 2013-2015 by the following authors: |
| 24 | - Rob Clark <robdclark@gmail.com> (robclark) | 16 | - Rob Clark <robdclark@gmail.com> (robclark) |
| @@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx) | |||
| 910 | case 2: return (mdp5_cfg->lm.base[2]); | 902 | case 2: return (mdp5_cfg->lm.base[2]); |
| 911 | case 3: return (mdp5_cfg->lm.base[3]); | 903 | case 3: return (mdp5_cfg->lm.base[3]); |
| 912 | case 4: return (mdp5_cfg->lm.base[4]); | 904 | case 4: return (mdp5_cfg->lm.base[4]); |
| 905 | case 5: return (mdp5_cfg->lm.base[5]); | ||
| 913 | default: return INVALID_IDX(idx); | 906 | default: return INVALID_IDX(idx); |
| 914 | } | 907 | } |
| 915 | } | 908 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 46fac545dc2b..2f2863cf8b45 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -62,8 +62,8 @@ struct mdp5_crtc { | |||
| 62 | 62 | ||
| 63 | /* current cursor being scanned out: */ | 63 | /* current cursor being scanned out: */ |
| 64 | struct drm_gem_object *scanout_bo; | 64 | struct drm_gem_object *scanout_bo; |
| 65 | uint32_t width; | 65 | uint32_t width, height; |
| 66 | uint32_t height; | 66 | uint32_t x, y; |
| 67 | } cursor; | 67 | } cursor; |
| 68 | }; | 68 | }; |
| 69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) | 69 | #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) |
| @@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc) | |||
| 103 | struct drm_plane *plane; | 103 | struct drm_plane *plane; |
| 104 | uint32_t flush_mask = 0; | 104 | uint32_t flush_mask = 0; |
| 105 | 105 | ||
| 106 | /* we could have already released CTL in the disable path: */ | 106 | /* this should not happen: */ |
| 107 | if (!mdp5_crtc->ctl) | 107 | if (WARN_ON(!mdp5_crtc->ctl)) |
| 108 | return; | 108 | return; |
| 109 | 109 | ||
| 110 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 110 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
| @@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
| 143 | drm_atomic_crtc_for_each_plane(plane, crtc) { | 143 | drm_atomic_crtc_for_each_plane(plane, crtc) { |
| 144 | mdp5_plane_complete_flip(plane); | 144 | mdp5_plane_complete_flip(plane); |
| 145 | } | 145 | } |
| 146 | |||
| 147 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
| 148 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
| 149 | mdp5_crtc->ctl = NULL; | ||
| 150 | } | ||
| 146 | } | 151 | } |
| 147 | 152 | ||
| 148 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) | 153 | static void unref_cursor_worker(struct drm_flip_work *work, void *val) |
| @@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) | |||
| 386 | mdp5_crtc->event = crtc->state->event; | 391 | mdp5_crtc->event = crtc->state->event; |
| 387 | spin_unlock_irqrestore(&dev->event_lock, flags); | 392 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 388 | 393 | ||
| 394 | /* | ||
| 395 | * If no CTL has been allocated in mdp5_crtc_atomic_check(), | ||
| 396 | * it means we are trying to flush a CRTC whose state is disabled: | ||
| 397 | * nothing else needs to be done. | ||
| 398 | */ | ||
| 399 | if (unlikely(!mdp5_crtc->ctl)) | ||
| 400 | return; | ||
| 401 | |||
| 389 | blend_setup(crtc); | 402 | blend_setup(crtc); |
| 390 | crtc_flush_all(crtc); | 403 | crtc_flush_all(crtc); |
| 391 | request_pending(crtc, PENDING_FLIP); | 404 | request_pending(crtc, PENDING_FLIP); |
| 392 | |||
| 393 | if (mdp5_crtc->ctl && !crtc->state->enable) { | ||
| 394 | mdp5_ctl_release(mdp5_crtc->ctl); | ||
| 395 | mdp5_crtc->ctl = NULL; | ||
| 396 | } | ||
| 397 | } | 405 | } |
| 398 | 406 | ||
| 399 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, | 407 | static int mdp5_crtc_set_property(struct drm_crtc *crtc, |
| @@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, | |||
| 403 | return -EINVAL; | 411 | return -EINVAL; |
| 404 | } | 412 | } |
| 405 | 413 | ||
| 414 | static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) | ||
| 415 | { | ||
| 416 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | ||
| 417 | uint32_t xres = crtc->mode.hdisplay; | ||
| 418 | uint32_t yres = crtc->mode.vdisplay; | ||
| 419 | |||
| 420 | /* | ||
| 421 | * Cursor Region Of Interest (ROI) is a plane read from cursor | ||
| 422 | * buffer to render. The ROI region is determined by the visibility of | ||
| 423 | * the cursor point. In the default Cursor image the cursor point will | ||
| 424 | * be at the top left of the cursor image, unless it is specified | ||
| 425 | * otherwise using hotspot feature. | ||
| 426 | * | ||
| 427 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
| 428 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
| 429 | * width and ROI height need to be evaluated to crop the cursor image | ||
| 430 | * accordingly. | ||
| 431 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
| 432 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
| 433 | */ | ||
| 434 | *roi_w = min(mdp5_crtc->cursor.width, xres - | ||
| 435 | mdp5_crtc->cursor.x); | ||
| 436 | *roi_h = min(mdp5_crtc->cursor.height, yres - | ||
| 437 | mdp5_crtc->cursor.y); | ||
| 438 | } | ||
| 439 | |||
| 406 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | 440 | static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, |
| 407 | struct drm_file *file, uint32_t handle, | 441 | struct drm_file *file, uint32_t handle, |
| 408 | uint32_t width, uint32_t height) | 442 | uint32_t width, uint32_t height) |
| @@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 416 | unsigned int depth; | 450 | unsigned int depth; |
| 417 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; | 451 | enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; |
| 418 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 452 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
| 453 | uint32_t roi_w, roi_h; | ||
| 419 | unsigned long flags; | 454 | unsigned long flags; |
| 420 | 455 | ||
| 421 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { | 456 | if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { |
| @@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 446 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 481 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
| 447 | old_bo = mdp5_crtc->cursor.scanout_bo; | 482 | old_bo = mdp5_crtc->cursor.scanout_bo; |
| 448 | 483 | ||
| 484 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
| 485 | mdp5_crtc->cursor.width = width; | ||
| 486 | mdp5_crtc->cursor.height = height; | ||
| 487 | |||
| 488 | get_roi(crtc, &roi_w, &roi_h); | ||
| 489 | |||
| 449 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); | 490 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); |
| 450 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), | 491 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), |
| 451 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); | 492 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); |
| @@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 453 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | | 494 | MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | |
| 454 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); | 495 | MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); |
| 455 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), | 496 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), |
| 456 | MDP5_LM_CURSOR_SIZE_ROI_H(height) | | 497 | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | |
| 457 | MDP5_LM_CURSOR_SIZE_ROI_W(width)); | 498 | MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); |
| 458 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); | 499 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); |
| 459 | 500 | ||
| 460 | |||
| 461 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; | 501 | blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; |
| 462 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN; | ||
| 463 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); | 502 | blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); |
| 464 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); | 503 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); |
| 465 | 504 | ||
| 466 | mdp5_crtc->cursor.scanout_bo = cursor_bo; | ||
| 467 | mdp5_crtc->cursor.width = width; | ||
| 468 | mdp5_crtc->cursor.height = height; | ||
| 469 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 505 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
| 470 | 506 | ||
| 471 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); | 507 | ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); |
| @@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
| 489 | struct mdp5_kms *mdp5_kms = get_kms(crtc); | 525 | struct mdp5_kms *mdp5_kms = get_kms(crtc); |
| 490 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); | 526 | struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); |
| 491 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); | 527 | uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); |
| 492 | uint32_t xres = crtc->mode.hdisplay; | ||
| 493 | uint32_t yres = crtc->mode.vdisplay; | ||
| 494 | uint32_t roi_w; | 528 | uint32_t roi_w; |
| 495 | uint32_t roi_h; | 529 | uint32_t roi_h; |
| 496 | unsigned long flags; | 530 | unsigned long flags; |
| 497 | 531 | ||
| 498 | x = (x > 0) ? x : 0; | 532 | /* In case the CRTC is disabled, just drop the cursor update */ |
| 499 | y = (y > 0) ? y : 0; | 533 | if (unlikely(!crtc->state->enable)) |
| 534 | return 0; | ||
| 500 | 535 | ||
| 501 | /* | 536 | mdp5_crtc->cursor.x = x = max(x, 0); |
| 502 | * Cursor Region Of Interest (ROI) is a plane read from cursor | 537 | mdp5_crtc->cursor.y = y = max(y, 0); |
| 503 | * buffer to render. The ROI region is determined by the visiblity of | 538 | |
| 504 | * the cursor point. In the default Cursor image the cursor point will | 539 | get_roi(crtc, &roi_w, &roi_h); |
| 505 | * be at the top left of the cursor image, unless it is specified | ||
| 506 | * otherwise using hotspot feature. | ||
| 507 | * | ||
| 508 | * If the cursor point reaches the right (xres - x < cursor.width) or | ||
| 509 | * bottom (yres - y < cursor.height) boundary of the screen, then ROI | ||
| 510 | * width and ROI height need to be evaluated to crop the cursor image | ||
| 511 | * accordingly. | ||
| 512 | * (xres-x) will be new cursor width when x > (xres - cursor.width) | ||
| 513 | * (yres-y) will be new cursor height when y > (yres - cursor.height) | ||
| 514 | */ | ||
| 515 | roi_w = min(mdp5_crtc->cursor.width, xres - x); | ||
| 516 | roi_h = min(mdp5_crtc->cursor.height, yres - y); | ||
| 517 | 540 | ||
| 518 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 541 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
| 519 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), | 542 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), |
| @@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { | |||
| 544 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { | 567 | static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { |
| 545 | .mode_fixup = mdp5_crtc_mode_fixup, | 568 | .mode_fixup = mdp5_crtc_mode_fixup, |
| 546 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, | 569 | .mode_set_nofb = mdp5_crtc_mode_set_nofb, |
| 547 | .prepare = mdp5_crtc_disable, | 570 | .disable = mdp5_crtc_disable, |
| 548 | .commit = mdp5_crtc_enable, | 571 | .enable = mdp5_crtc_enable, |
| 549 | .atomic_check = mdp5_crtc_atomic_check, | 572 | .atomic_check = mdp5_crtc_atomic_check, |
| 550 | .atomic_begin = mdp5_crtc_atomic_begin, | 573 | .atomic_begin = mdp5_crtc_atomic_begin, |
| 551 | .atomic_flush = mdp5_crtc_atomic_flush, | 574 | .atomic_flush = mdp5_crtc_atomic_flush, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index d6a14bb99988..af0e02fa4f48 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
| @@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) | |||
| 267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); | 267 | mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); |
| 268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); | 268 | spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); |
| 269 | 269 | ||
| 270 | mdp5_encoder->enabled = false; | 270 | mdp5_encoder->enabled = true; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { | 273 | static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { |
| 274 | .mode_fixup = mdp5_encoder_mode_fixup, | 274 | .mode_fixup = mdp5_encoder_mode_fixup, |
| 275 | .mode_set = mdp5_encoder_mode_set, | 275 | .mode_set = mdp5_encoder_mode_set, |
| 276 | .prepare = mdp5_encoder_disable, | 276 | .disable = mdp5_encoder_disable, |
| 277 | .commit = mdp5_encoder_enable, | 277 | .enable = mdp5_encoder_enable, |
| 278 | }; | 278 | }; |
| 279 | 279 | ||
| 280 | /* initialize encoder */ | 280 | /* initialize encoder */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 70ac81edd40f..a9407105b9b7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c | |||
| @@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) | |||
| 34 | void mdp5_irq_preinstall(struct msm_kms *kms) | 34 | void mdp5_irq_preinstall(struct msm_kms *kms) |
| 35 | { | 35 | { |
| 36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 36 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
| 37 | mdp5_enable(mdp5_kms); | ||
| 37 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); | 38 | mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); |
| 39 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | ||
| 40 | mdp5_disable(mdp5_kms); | ||
| 38 | } | 41 | } |
| 39 | 42 | ||
| 40 | int mdp5_irq_postinstall(struct msm_kms *kms) | 43 | int mdp5_irq_postinstall(struct msm_kms *kms) |
| @@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms) | |||
| 57 | void mdp5_irq_uninstall(struct msm_kms *kms) | 60 | void mdp5_irq_uninstall(struct msm_kms *kms) |
| 58 | { | 61 | { |
| 59 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 62 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
| 63 | mdp5_enable(mdp5_kms); | ||
| 60 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); | 64 | mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); |
| 65 | mdp5_disable(mdp5_kms); | ||
| 61 | } | 66 | } |
| 62 | 67 | ||
| 63 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) | 68 | static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) |
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 871aa2108dc6..18fd643b6e69 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
| @@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev, | |||
| 219 | * mark our set of crtc's as busy: | 219 | * mark our set of crtc's as busy: |
| 220 | */ | 220 | */ |
| 221 | ret = start_atomic(dev->dev_private, c->crtc_mask); | 221 | ret = start_atomic(dev->dev_private, c->crtc_mask); |
| 222 | if (ret) | 222 | if (ret) { |
| 223 | kfree(c); | ||
| 223 | return ret; | 224 | return ret; |
| 225 | } | ||
| 224 | 226 | ||
| 225 | /* | 227 | /* |
| 226 | * This is the point of no return - everything below never fails except | 228 | * This is the point of no return - everything below never fails except |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 79924e4b1b49..6751553abe4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, | |||
| 418 | nouveau_fbcon_zfill(dev, fbcon); | 418 | nouveau_fbcon_zfill(dev, fbcon); |
| 419 | 419 | ||
| 420 | /* To allow resizeing without swapping buffers */ | 420 | /* To allow resizeing without swapping buffers */ |
| 421 | NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", | 421 | NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", |
| 422 | nouveau_fb->base.width, nouveau_fb->base.height, | 422 | nouveau_fb->base.width, nouveau_fb->base.height, |
| 423 | nvbo->bo.offset, nvbo); | 423 | nvbo->bo.offset, nvbo); |
| 424 | 424 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index ed644a4f6f57..86807ee91bd1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 1405 | (x << 16) | y); | 1405 | (x << 16) | y); |
| 1406 | viewport_w = crtc->mode.hdisplay; | 1406 | viewport_w = crtc->mode.hdisplay; |
| 1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | 1407 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; |
| 1408 | if ((rdev->family >= CHIP_BONAIRE) && | ||
| 1409 | (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) | ||
| 1410 | viewport_h *= 2; | ||
| 1408 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, | 1411 | WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
| 1409 | (viewport_w << 16) | viewport_h); | 1412 | (viewport_w << 16) | viewport_h); |
| 1410 | 1413 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 7fe7b749e182..c39c1d0d9d4e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
| @@ -1626,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
| 1626 | struct radeon_connector *radeon_connector = NULL; | 1626 | struct radeon_connector *radeon_connector = NULL; |
| 1627 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; | 1627 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; |
| 1628 | bool travis_quirk = false; | 1628 | bool travis_quirk = false; |
| 1629 | int encoder_mode; | ||
| 1630 | 1629 | ||
| 1631 | if (connector) { | 1630 | if (connector) { |
| 1632 | radeon_connector = to_radeon_connector(connector); | 1631 | radeon_connector = to_radeon_connector(connector); |
| @@ -1722,13 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | |||
| 1722 | } | 1721 | } |
| 1723 | break; | 1722 | break; |
| 1724 | } | 1723 | } |
| 1725 | |||
| 1726 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
| 1727 | if (connector && (radeon_audio != 0) && | ||
| 1728 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
| 1729 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
| 1730 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
| 1731 | radeon_audio_dpms(encoder, mode); | ||
| 1732 | } | 1724 | } |
| 1733 | 1725 | ||
| 1734 | static void | 1726 | static void |
| @@ -1737,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 1737 | struct drm_device *dev = encoder->dev; | 1729 | struct drm_device *dev = encoder->dev; |
| 1738 | struct radeon_device *rdev = dev->dev_private; | 1730 | struct radeon_device *rdev = dev->dev_private; |
| 1739 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1731 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 1732 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
| 1733 | int encoder_mode = atombios_get_encoder_mode(encoder); | ||
| 1740 | 1734 | ||
| 1741 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | 1735 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
| 1742 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | 1736 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
| 1743 | radeon_encoder->active_device); | 1737 | radeon_encoder->active_device); |
| 1738 | |||
| 1739 | if (connector && (radeon_audio != 0) && | ||
| 1740 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
| 1741 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
| 1742 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
| 1743 | radeon_audio_dpms(encoder, mode); | ||
| 1744 | |||
| 1744 | switch (radeon_encoder->encoder_id) { | 1745 | switch (radeon_encoder->encoder_id) { |
| 1745 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | 1746 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
| 1746 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | 1747 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
| @@ -2170,12 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
| 2170 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | 2171 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
| 2171 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 2172 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
| 2172 | /* handled in dpms */ | 2173 | /* handled in dpms */ |
| 2173 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
| 2174 | if (connector && (radeon_audio != 0) && | ||
| 2175 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
| 2176 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
| 2177 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
| 2178 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
| 2179 | break; | 2174 | break; |
| 2180 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 2175 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
| 2181 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | 2176 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
| @@ -2197,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
| 2197 | } | 2192 | } |
| 2198 | 2193 | ||
| 2199 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | 2194 | atombios_apply_encoder_quirks(encoder, adjusted_mode); |
| 2195 | |||
| 2196 | encoder_mode = atombios_get_encoder_mode(encoder); | ||
| 2197 | if (connector && (radeon_audio != 0) && | ||
| 2198 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | ||
| 2199 | (ENCODER_MODE_IS_DP(encoder_mode) && | ||
| 2200 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
| 2201 | radeon_audio_mode_set(encoder, adjusted_mode); | ||
| 2200 | } | 2202 | } |
| 2201 | 2203 | ||
| 2202 | static bool | 2204 | static bool |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 0c993da9c8fb..3e670d344a20 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -7555,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev) | |||
| 7555 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | 7555 | WREG32(DC_HPD5_INT_CONTROL, hpd5); |
| 7556 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | 7556 | WREG32(DC_HPD6_INT_CONTROL, hpd6); |
| 7557 | 7557 | ||
| 7558 | /* posting read */ | ||
| 7559 | RREG32(SRBM_STATUS); | ||
| 7560 | |||
| 7558 | return 0; | 7561 | return 0; |
| 7559 | } | 7562 | } |
| 7560 | 7563 | ||
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 192c80389151..3adc2afe32aa 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
| @@ -26,6 +26,9 @@ | |||
| 26 | #include "radeon_audio.h" | 26 | #include "radeon_audio.h" |
| 27 | #include "sid.h" | 27 | #include "sid.h" |
| 28 | 28 | ||
| 29 | #define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8 | ||
| 30 | #define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc | ||
| 31 | |||
| 29 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, | 32 | u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
| 30 | u32 block_offset, u32 reg) | 33 | u32 block_offset, u32 reg) |
| 31 | { | 34 | { |
| @@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev, | |||
| 252 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, | 255 | void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, |
| 253 | struct radeon_crtc *crtc, unsigned int clock) | 256 | struct radeon_crtc *crtc, unsigned int clock) |
| 254 | { | 257 | { |
| 255 | /* Two dtos; generally use dto0 for HDMI */ | 258 | /* Two dtos; generally use dto0 for HDMI */ |
| 256 | u32 value = 0; | 259 | u32 value = 0; |
| 257 | 260 | ||
| 258 | if (crtc) | 261 | if (crtc) |
| 259 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 262 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
| 260 | 263 | ||
| 261 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 264 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
| 262 | 265 | ||
| 263 | /* Express [24MHz / target pixel clock] as an exact rational | 266 | /* Express [24MHz / target pixel clock] as an exact rational |
| 264 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 267 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
| 265 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 268 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
| 266 | */ | 269 | */ |
| 267 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); | 270 | WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); |
| 268 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); | 271 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock); |
| 269 | } | 272 | } |
| 270 | 273 | ||
| 271 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce6_dp_audio_set_dto(struct radeon_device *rdev, |
| 272 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
| 273 | { | 276 | { |
| 274 | /* Two dtos; generally use dto1 for DP */ | 277 | /* Two dtos; generally use dto1 for DP */ |
| 275 | u32 value = 0; | 278 | u32 value = 0; |
| 276 | value |= DCCG_AUDIO_DTO_SEL; | 279 | value |= DCCG_AUDIO_DTO_SEL; |
| 277 | 280 | ||
| 278 | if (crtc) | 281 | if (crtc) |
| 279 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); | 282 | value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
| 280 | 283 | ||
| 281 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); | 284 | WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
| 282 | 285 | ||
| 283 | /* Express [24MHz / target pixel clock] as an exact rational | 286 | /* Express [24MHz / target pixel clock] as an exact rational |
| 284 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 287 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
| 285 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 288 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
| 286 | */ | 289 | */ |
| 287 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 290 | if (ASIC_IS_DCE8(rdev)) { |
| 288 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | 291 | WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); |
| 292 | WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); | ||
| 293 | } else { | ||
| 294 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | ||
| 295 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | ||
| 296 | } | ||
| 289 | } | 297 | } |
| 290 | 298 | ||
| 291 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 299 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable) |
| 292 | { | 300 | { |
| 293 | struct drm_device *dev = encoder->dev; | 301 | struct drm_device *dev = encoder->dev; |
| 294 | struct radeon_device *rdev = dev->dev_private; | 302 | struct radeon_device *rdev = dev->dev_private; |
| 295 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 303 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 296 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 304 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 297 | uint32_t offset; | ||
| 298 | 305 | ||
| 299 | if (!dig || !dig->afmt) | 306 | if (!dig || !dig->afmt) |
| 300 | return; | 307 | return; |
| 301 | 308 | ||
| 302 | offset = dig->afmt->offset; | ||
| 303 | |||
| 304 | if (enable) { | 309 | if (enable) { |
| 305 | if (dig->afmt->enabled) | 310 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
| 306 | return; | 311 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
| 307 | 312 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, | |
| 308 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | 313 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
| 309 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 314 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
| 310 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 315 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
| 311 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 316 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
| 312 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | ||
| 313 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | ||
| 314 | radeon_audio_enable(rdev, dig->afmt->pin, true); | ||
| 315 | } else { | 317 | } else { |
| 316 | if (!dig->afmt->enabled) | 318 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
| 317 | return; | ||
| 318 | |||
| 319 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
| 320 | radeon_audio_enable(rdev, dig->afmt->pin, false); | ||
| 321 | } | 319 | } |
| 322 | 320 | ||
| 323 | dig->afmt->enabled = enable; | 321 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4c0e24b3bb90..973df064c14f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -4593,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
| 4593 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); | 4593 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); |
| 4594 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); | 4594 | WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); |
| 4595 | 4595 | ||
| 4596 | /* posting read */ | ||
| 4597 | RREG32(SRBM_STATUS); | ||
| 4598 | |||
| 4596 | return 0; | 4599 | return 0; |
| 4597 | } | 4600 | } |
| 4598 | 4601 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 1d9aebc79595..c18d4ecbd95d 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
| @@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, | |||
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, | 274 | void dce4_dp_audio_set_dto(struct radeon_device *rdev, |
| 275 | struct radeon_crtc *crtc, unsigned int clock) | 275 | struct radeon_crtc *crtc, unsigned int clock) |
| 276 | { | 276 | { |
| 277 | u32 value; | 277 | u32 value; |
| 278 | 278 | ||
| @@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev, | |||
| 294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 294 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
| 295 | */ | 295 | */ |
| 296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 296 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
| 297 | WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10); | 297 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) | 300 | void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) |
| @@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) | |||
| 350 | struct drm_device *dev = encoder->dev; | 350 | struct drm_device *dev = encoder->dev; |
| 351 | struct radeon_device *rdev = dev->dev_private; | 351 | struct radeon_device *rdev = dev->dev_private; |
| 352 | 352 | ||
| 353 | WREG32(HDMI_INFOFRAME_CONTROL0 + offset, | ||
| 354 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | ||
| 355 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | ||
| 356 | |||
| 357 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, | 353 | WREG32(AFMT_INFOFRAME_CONTROL0 + offset, |
| 358 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ | 354 | AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ |
| 359 | 355 | ||
| 360 | WREG32(HDMI_INFOFRAME_CONTROL1 + offset, | ||
| 361 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ | ||
| 362 | |||
| 363 | WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, | ||
| 364 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
| 365 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
| 366 | |||
| 367 | WREG32(AFMT_60958_0 + offset, | 356 | WREG32(AFMT_60958_0 + offset, |
| 368 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); | 357 | AFMT_60958_CS_CHANNEL_NUMBER_L(1)); |
| 369 | 358 | ||
| @@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 408 | if (!dig || !dig->afmt) | 397 | if (!dig || !dig->afmt) |
| 409 | return; | 398 | return; |
| 410 | 399 | ||
| 411 | /* Silent, r600_hdmi_enable will raise WARN for us */ | 400 | if (enable) { |
| 412 | if (enable && dig->afmt->enabled) | 401 | WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, |
| 413 | return; | 402 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ |
| 414 | if (!enable && !dig->afmt->enabled) | 403 | |
| 415 | return; | 404 | WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
| 405 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
| 406 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
| 416 | 407 | ||
| 417 | if (!enable && dig->afmt->pin) { | 408 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
| 418 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 409 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
| 419 | dig->afmt->pin = NULL; | 410 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ |
| 411 | } else { | ||
| 412 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); | ||
| 420 | } | 413 | } |
| 421 | 414 | ||
| 422 | dig->afmt->enabled = enable; | 415 | dig->afmt->enabled = enable; |
| @@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 425 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); | 418 | enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); |
| 426 | } | 419 | } |
| 427 | 420 | ||
| 428 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | 421 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) |
| 429 | { | 422 | { |
| 430 | struct drm_device *dev = encoder->dev; | 423 | struct drm_device *dev = encoder->dev; |
| 431 | struct radeon_device *rdev = dev->dev_private; | 424 | struct radeon_device *rdev = dev->dev_private; |
| 432 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 425 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 433 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 426 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 434 | uint32_t offset; | ||
| 435 | 427 | ||
| 436 | if (!dig || !dig->afmt) | 428 | if (!dig || !dig->afmt) |
| 437 | return; | 429 | return; |
| 438 | 430 | ||
| 439 | offset = dig->afmt->offset; | ||
| 440 | |||
| 441 | if (enable) { | 431 | if (enable) { |
| 442 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 432 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 443 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 433 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
| 444 | struct radeon_connector_atom_dig *dig_connector; | 434 | struct radeon_connector_atom_dig *dig_connector; |
| 445 | uint32_t val; | 435 | uint32_t val; |
| 446 | 436 | ||
| 447 | if (dig->afmt->enabled) | 437 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
| 448 | return; | 438 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
| 449 | |||
| 450 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | ||
| 451 | 439 | ||
| 452 | if (radeon_connector->con_priv) { | 440 | if (radeon_connector->con_priv) { |
| 453 | dig_connector = radeon_connector->con_priv; | 441 | dig_connector = radeon_connector->con_priv; |
| 454 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset); | 442 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); |
| 455 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); | 443 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); |
| 456 | 444 | ||
| 457 | if (dig_connector->dp_clock == 162000) | 445 | if (dig_connector->dp_clock == 162000) |
| @@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable) | |||
| 459 | else | 447 | else |
| 460 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); | 448 | val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); |
| 461 | 449 | ||
| 462 | WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val); | 450 | WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val); |
| 463 | } | 451 | } |
| 464 | 452 | ||
| 465 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, | 453 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, |
| 466 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | 454 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
| 467 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | 455 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
| 468 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | 456 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
| 469 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | 457 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
| 470 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
| 471 | } else { | 458 | } else { |
| 472 | if (!dig->afmt->enabled) | 459 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
| 473 | return; | ||
| 474 | |||
| 475 | WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0); | ||
| 476 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
| 477 | } | 460 | } |
| 478 | 461 | ||
| 479 | dig->afmt->enabled = enable; | 462 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 279801ca5110..04f2514f7564 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev) | |||
| 728 | tmp |= RADEON_FP2_DETECT_MASK; | 728 | tmp |= RADEON_FP2_DETECT_MASK; |
| 729 | } | 729 | } |
| 730 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 730 | WREG32(RADEON_GEN_INT_CNTL, tmp); |
| 731 | |||
| 732 | /* read back to post the write */ | ||
| 733 | RREG32(RADEON_GEN_INT_CNTL); | ||
| 734 | |||
| 731 | return 0; | 735 | return 0; |
| 732 | } | 736 | } |
| 733 | 737 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 07a71a2488c9..2fcad344492f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev) | |||
| 3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); | 3784 | WREG32(RV770_CG_THERMAL_INT, thermal_int); |
| 3785 | } | 3785 | } |
| 3786 | 3786 | ||
| 3787 | /* posting read */ | ||
| 3788 | RREG32(R_000E50_SRBM_STATUS); | ||
| 3789 | |||
| 3787 | return 0; | 3790 | return 0; |
| 3788 | } | 3791 | } |
| 3789 | 3792 | ||
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 62c91ed669ce..dd6606b8e23c 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
| @@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
| 476 | if (!dig || !dig->afmt) | 476 | if (!dig || !dig->afmt) |
| 477 | return; | 477 | return; |
| 478 | 478 | ||
| 479 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
| 480 | if (enable && dig->afmt->enabled) | ||
| 481 | return; | ||
| 482 | if (!enable && !dig->afmt->enabled) | ||
| 483 | return; | ||
| 484 | |||
| 485 | if (!enable && dig->afmt->pin) { | ||
| 486 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
| 487 | dig->afmt->pin = NULL; | ||
| 488 | } | ||
| 489 | |||
| 490 | /* Older chipsets require setting HDMI and routing manually */ | 479 | /* Older chipsets require setting HDMI and routing manually */ |
| 491 | if (!ASIC_IS_DCE3(rdev)) { | 480 | if (!ASIC_IS_DCE3(rdev)) { |
| 492 | if (enable) | 481 | if (enable) |
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index a3ceef6d9632..b21ef69a34ac 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c | |||
| @@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
| 101 | struct drm_display_mode *mode); | 101 | struct drm_display_mode *mode); |
| 102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); | 102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); |
| 103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); | 103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
| 104 | void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 104 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); |
| 105 | void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable); | 105 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable); |
| 106 | 106 | ||
| 107 | static const u32 pin_offsets[7] = | 107 | static const u32 pin_offsets[7] = |
| 108 | { | 108 | { |
| @@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = { | |||
| 210 | .set_avi_packet = evergreen_set_avi_packet, | 210 | .set_avi_packet = evergreen_set_avi_packet, |
| 211 | .set_audio_packet = dce4_set_audio_packet, | 211 | .set_audio_packet = dce4_set_audio_packet, |
| 212 | .mode_set = radeon_audio_dp_mode_set, | 212 | .mode_set = radeon_audio_dp_mode_set, |
| 213 | .dpms = evergreen_enable_dp_audio_packets, | 213 | .dpms = evergreen_dp_enable, |
| 214 | }; | 214 | }; |
| 215 | 215 | ||
| 216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { | 216 | static struct radeon_audio_funcs dce6_hdmi_funcs = { |
| @@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = { | |||
| 240 | .set_avi_packet = evergreen_set_avi_packet, | 240 | .set_avi_packet = evergreen_set_avi_packet, |
| 241 | .set_audio_packet = dce4_set_audio_packet, | 241 | .set_audio_packet = dce4_set_audio_packet, |
| 242 | .mode_set = radeon_audio_dp_mode_set, | 242 | .mode_set = radeon_audio_dp_mode_set, |
| 243 | .dpms = dce6_enable_dp_audio_packets, | 243 | .dpms = dce6_dp_enable, |
| 244 | }; | 244 | }; |
| 245 | 245 | ||
| 246 | static void radeon_audio_interface_init(struct radeon_device *rdev) | 246 | static void radeon_audio_interface_init(struct radeon_device *rdev) |
| @@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev, | |||
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | void radeon_audio_detect(struct drm_connector *connector, | 454 | void radeon_audio_detect(struct drm_connector *connector, |
| 455 | enum drm_connector_status status) | 455 | enum drm_connector_status status) |
| 456 | { | 456 | { |
| 457 | struct radeon_device *rdev; | 457 | struct radeon_device *rdev; |
| 458 | struct radeon_encoder *radeon_encoder; | 458 | struct radeon_encoder *radeon_encoder; |
| @@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector, | |||
| 483 | else | 483 | else |
| 484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; | 484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
| 485 | 485 | ||
| 486 | radeon_audio_write_speaker_allocation(connector->encoder); | 486 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); |
| 487 | radeon_audio_write_sad_regs(connector->encoder); | ||
| 488 | if (connector->encoder->crtc) | ||
| 489 | radeon_audio_write_latency_fields(connector->encoder, | ||
| 490 | &connector->encoder->crtc->mode); | ||
| 491 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | 487 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
| 492 | } else { | 488 | } else { |
| 493 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 489 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
| 490 | dig->afmt->pin = NULL; | ||
| 494 | } | 491 | } |
| 495 | } | 492 | } |
| 496 | 493 | ||
| @@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute) | |||
| 694 | * update the info frames with the data from the current display mode | 691 | * update the info frames with the data from the current display mode |
| 695 | */ | 692 | */ |
| 696 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | 693 | static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, |
| 697 | struct drm_display_mode *mode) | 694 | struct drm_display_mode *mode) |
| 698 | { | 695 | { |
| 699 | struct radeon_device *rdev = encoder->dev->dev_private; | ||
| 700 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 696 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 701 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 697 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 702 | 698 | ||
| 703 | if (!dig || !dig->afmt) | 699 | if (!dig || !dig->afmt) |
| 704 | return; | 700 | return; |
| 705 | 701 | ||
| 706 | /* disable audio prior to setting up hw */ | 702 | radeon_audio_set_mute(encoder, true); |
| 707 | dig->afmt->pin = radeon_audio_get_pin(encoder); | ||
| 708 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
| 709 | 703 | ||
| 704 | radeon_audio_write_speaker_allocation(encoder); | ||
| 705 | radeon_audio_write_sad_regs(encoder); | ||
| 706 | radeon_audio_write_latency_fields(encoder, mode); | ||
| 710 | radeon_audio_set_dto(encoder, mode->clock); | 707 | radeon_audio_set_dto(encoder, mode->clock); |
| 711 | radeon_audio_set_vbi_packet(encoder); | 708 | radeon_audio_set_vbi_packet(encoder); |
| 712 | radeon_hdmi_set_color_depth(encoder); | 709 | radeon_hdmi_set_color_depth(encoder); |
| 713 | radeon_audio_set_mute(encoder, false); | ||
| 714 | radeon_audio_update_acr(encoder, mode->clock); | 710 | radeon_audio_update_acr(encoder, mode->clock); |
| 715 | radeon_audio_set_audio_packet(encoder); | 711 | radeon_audio_set_audio_packet(encoder); |
| 716 | radeon_audio_select_pin(encoder); | 712 | radeon_audio_select_pin(encoder); |
| @@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, | |||
| 718 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 714 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
| 719 | return; | 715 | return; |
| 720 | 716 | ||
| 721 | /* enable audio after to setting up hw */ | 717 | radeon_audio_set_mute(encoder, false); |
| 722 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
| 723 | } | 718 | } |
| 724 | 719 | ||
| 725 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | 720 | static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, |
| @@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
| 729 | struct radeon_device *rdev = dev->dev_private; | 724 | struct radeon_device *rdev = dev->dev_private; |
| 730 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 725 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 731 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 726 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 727 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
| 728 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 729 | struct radeon_connector_atom_dig *dig_connector = | ||
| 730 | radeon_connector->con_priv; | ||
| 732 | 731 | ||
| 733 | if (!dig || !dig->afmt) | 732 | if (!dig || !dig->afmt) |
| 734 | return; | 733 | return; |
| 735 | 734 | ||
| 736 | /* disable audio prior to setting up hw */ | 735 | radeon_audio_write_speaker_allocation(encoder); |
| 737 | dig->afmt->pin = radeon_audio_get_pin(encoder); | 736 | radeon_audio_write_sad_regs(encoder); |
| 738 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 737 | radeon_audio_write_latency_fields(encoder, mode); |
| 739 | 738 | if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) | |
| 740 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); | 739 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); |
| 740 | else | ||
| 741 | radeon_audio_set_dto(encoder, dig_connector->dp_clock); | ||
| 741 | radeon_audio_set_audio_packet(encoder); | 742 | radeon_audio_set_audio_packet(encoder); |
| 742 | radeon_audio_select_pin(encoder); | 743 | radeon_audio_select_pin(encoder); |
| 743 | 744 | ||
| 744 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) | 745 | if (radeon_audio_set_avi_packet(encoder, mode) < 0) |
| 745 | return; | 746 | return; |
| 746 | |||
| 747 | /* enable audio after to setting up hw */ | ||
| 748 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
| 749 | } | 747 | } |
| 750 | 748 | ||
| 751 | void radeon_audio_mode_set(struct drm_encoder *encoder, | 749 | void radeon_audio_mode_set(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a579ed379f20..4d0f96cc3da4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
| @@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
| 256 | u32 ring = RADEON_CS_RING_GFX; | 256 | u32 ring = RADEON_CS_RING_GFX; |
| 257 | s32 priority = 0; | 257 | s32 priority = 0; |
| 258 | 258 | ||
| 259 | INIT_LIST_HEAD(&p->validated); | ||
| 260 | |||
| 259 | if (!cs->num_chunks) { | 261 | if (!cs->num_chunks) { |
| 260 | return 0; | 262 | return 0; |
| 261 | } | 263 | } |
| 264 | |||
| 262 | /* get chunks */ | 265 | /* get chunks */ |
| 263 | INIT_LIST_HEAD(&p->validated); | ||
| 264 | p->idx = 0; | 266 | p->idx = 0; |
| 265 | p->ib.sa_bo = NULL; | 267 | p->ib.sa_bo = NULL; |
| 266 | p->const_ib.sa_bo = NULL; | 268 | p->const_ib.sa_bo = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) | |||
| 1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | 1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
| 1031 | } | 1031 | } |
| 1032 | 1032 | ||
| 1033 | struct radeon_wait_cb { | ||
| 1034 | struct fence_cb base; | ||
| 1035 | struct task_struct *task; | ||
| 1036 | }; | ||
| 1037 | |||
| 1038 | static void | ||
| 1039 | radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
| 1040 | { | ||
| 1041 | struct radeon_wait_cb *wait = | ||
| 1042 | container_of(cb, struct radeon_wait_cb, base); | ||
| 1043 | |||
| 1044 | wake_up_process(wait->task); | ||
| 1045 | } | ||
| 1046 | |||
| 1033 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, | 1047 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, |
| 1034 | signed long t) | 1048 | signed long t) |
| 1035 | { | 1049 | { |
| 1036 | struct radeon_fence *fence = to_radeon_fence(f); | 1050 | struct radeon_fence *fence = to_radeon_fence(f); |
| 1037 | struct radeon_device *rdev = fence->rdev; | 1051 | struct radeon_device *rdev = fence->rdev; |
| 1038 | bool signaled; | 1052 | struct radeon_wait_cb cb; |
| 1039 | 1053 | ||
| 1040 | fence_enable_sw_signaling(&fence->base); | 1054 | cb.task = current; |
| 1041 | 1055 | ||
| 1042 | /* | 1056 | if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
| 1043 | * This function has to return -EDEADLK, but cannot hold | 1057 | return t; |
| 1044 | * exclusive_lock during the wait because some callers | 1058 | |
| 1045 | * may already hold it. This means checking needs_reset without | 1059 | while (t > 0) { |
| 1046 | * lock, and not fiddling with any gpu internals. | 1060 | if (intr) |
| 1047 | * | 1061 | set_current_state(TASK_INTERRUPTIBLE); |
| 1048 | * The callback installed with fence_enable_sw_signaling will | 1062 | else |
| 1049 | * run before our wait_event_*timeout call, so we will see | 1063 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1050 | * both the signaled fence and the changes to needs_reset. | 1064 | |
| 1051 | */ | 1065 | /* |
| 1066 | * radeon_test_signaled must be called after | ||
| 1067 | * set_current_state to prevent a race with wake_up_process | ||
| 1068 | */ | ||
| 1069 | if (radeon_test_signaled(fence)) | ||
| 1070 | break; | ||
| 1071 | |||
| 1072 | if (rdev->needs_reset) { | ||
| 1073 | t = -EDEADLK; | ||
| 1074 | break; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | t = schedule_timeout(t); | ||
| 1078 | |||
| 1079 | if (t > 0 && intr && signal_pending(current)) | ||
| 1080 | t = -ERESTARTSYS; | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | __set_current_state(TASK_RUNNING); | ||
| 1084 | fence_remove_callback(f, &cb.base); | ||
| 1052 | 1085 | ||
| 1053 | if (intr) | ||
| 1054 | t = wait_event_interruptible_timeout(rdev->fence_queue, | ||
| 1055 | ((signaled = radeon_test_signaled(fence)) || | ||
| 1056 | rdev->needs_reset), t); | ||
| 1057 | else | ||
| 1058 | t = wait_event_timeout(rdev->fence_queue, | ||
| 1059 | ((signaled = radeon_test_signaled(fence)) || | ||
| 1060 | rdev->needs_reset), t); | ||
| 1061 | |||
| 1062 | if (t > 0 && !signaled) | ||
| 1063 | return -EDEADLK; | ||
| 1064 | return t; | 1086 | return t; |
| 1065 | } | 1087 | } |
| 1066 | 1088 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d81182ad53ec..97a904835759 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
| 694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | 694 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); |
| 695 | if (ASIC_IS_DCE2(rdev)) | 695 | if (ASIC_IS_DCE2(rdev)) |
| 696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); | 696 | WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); |
| 697 | |||
| 698 | /* posting read */ | ||
| 699 | RREG32(R_000040_GEN_INT_CNTL); | ||
| 700 | |||
| 697 | return 0; | 701 | return 0; |
| 698 | } | 702 | } |
| 699 | 703 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index bcf516a8a2f1..a7fb2735d4a9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -6203,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev) | |||
| 6203 | 6203 | ||
| 6204 | WREG32(CG_THERMAL_INT, thermal_int); | 6204 | WREG32(CG_THERMAL_INT, thermal_int); |
| 6205 | 6205 | ||
| 6206 | /* posting read */ | ||
| 6207 | RREG32(SRBM_STATUS); | ||
| 6208 | |||
| 6206 | return 0; | 6209 | return 0; |
| 6207 | } | 6210 | } |
| 6208 | 6211 | ||
| @@ -7127,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
| 7127 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); | 7130 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
| 7128 | 7131 | ||
| 7129 | if (!vclk || !dclk) { | 7132 | if (!vclk || !dclk) { |
| 7130 | /* keep the Bypass mode, put PLL to sleep */ | 7133 | /* keep the Bypass mode */ |
| 7131 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
| 7132 | return 0; | 7134 | return 0; |
| 7133 | } | 7135 | } |
| 7134 | 7136 | ||
| @@ -7144,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
| 7144 | /* set VCO_MODE to 1 */ | 7146 | /* set VCO_MODE to 1 */ |
| 7145 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); | 7147 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
| 7146 | 7148 | ||
| 7147 | /* toggle UPLL_SLEEP to 1 then back to 0 */ | 7149 | /* disable sleep mode */ |
| 7148 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
| 7149 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); | 7150 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); |
| 7150 | 7151 | ||
| 7151 | /* deassert UPLL_RESET */ | 7152 | /* deassert UPLL_RESET */ |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index c27118cab16a..99a9835c9f61 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
| @@ -912,8 +912,8 @@ | |||
| 912 | 912 | ||
| 913 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 | 913 | #define DCCG_AUDIO_DTO0_PHASE 0x05b0 |
| 914 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 914 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
| 915 | #define DCCG_AUDIO_DTO1_PHASE 0x05b8 | 915 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
| 916 | #define DCCG_AUDIO_DTO1_MODULE 0x05bc | 916 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
| 917 | 917 | ||
| 918 | #define AFMT_AUDIO_SRC_CONTROL 0x713c | 918 | #define AFMT_AUDIO_SRC_CONTROL 0x713c |
| 919 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) | 919 | #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d395b0bef73b..8d9b7de25613 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | |||
| 74 | pr_err(" has_type: %d\n", man->has_type); | 74 | pr_err(" has_type: %d\n", man->has_type); |
| 75 | pr_err(" use_type: %d\n", man->use_type); | 75 | pr_err(" use_type: %d\n", man->use_type); |
| 76 | pr_err(" flags: 0x%08X\n", man->flags); | 76 | pr_err(" flags: 0x%08X\n", man->flags); |
| 77 | pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); | 77 | pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); |
| 78 | pr_err(" size: %llu\n", man->size); | 78 | pr_err(" size: %llu\n", man->size); |
| 79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); | 79 | pr_err(" available_caching: 0x%08X\n", man->available_caching); |
| 80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); | 80 | pr_err(" default_caching: 0x%08X\n", man->default_caching); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 725 | goto out_err1; | 725 | goto out_err1; |
| 726 | } | 726 | } |
| 727 | 727 | ||
| 728 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
| 729 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
| 730 | if (unlikely(ret != 0)) { | ||
| 731 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
| 732 | goto out_err2; | ||
| 733 | } | ||
| 734 | |||
| 735 | dev_priv->has_gmr = true; | ||
| 736 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
| 737 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
| 738 | VMW_PL_GMR) != 0) { | ||
| 739 | DRM_INFO("No GMR memory available. " | ||
| 740 | "Graphics memory resources are very limited.\n"); | ||
| 741 | dev_priv->has_gmr = false; | ||
| 742 | } | ||
| 743 | |||
| 744 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 745 | dev_priv->has_mob = true; | ||
| 746 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
| 747 | VMW_PL_MOB) != 0) { | ||
| 748 | DRM_INFO("No MOB memory available. " | ||
| 749 | "3D will be disabled.\n"); | ||
| 750 | dev_priv->has_mob = false; | ||
| 751 | } | ||
| 752 | } | ||
| 753 | |||
| 754 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 728 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
| 755 | dev_priv->mmio_size); | 729 | dev_priv->mmio_size); |
| 756 | 730 | ||
| @@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 813 | goto out_no_fman; | 787 | goto out_no_fman; |
| 814 | } | 788 | } |
| 815 | 789 | ||
| 790 | |||
| 791 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
| 792 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
| 793 | if (unlikely(ret != 0)) { | ||
| 794 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
| 795 | goto out_no_vram; | ||
| 796 | } | ||
| 797 | |||
| 798 | dev_priv->has_gmr = true; | ||
| 799 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
| 800 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
| 801 | VMW_PL_GMR) != 0) { | ||
| 802 | DRM_INFO("No GMR memory available. " | ||
| 803 | "Graphics memory resources are very limited.\n"); | ||
| 804 | dev_priv->has_gmr = false; | ||
| 805 | } | ||
| 806 | |||
| 807 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
| 808 | dev_priv->has_mob = true; | ||
| 809 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
| 810 | VMW_PL_MOB) != 0) { | ||
| 811 | DRM_INFO("No MOB memory available. " | ||
| 812 | "3D will be disabled.\n"); | ||
| 813 | dev_priv->has_mob = false; | ||
| 814 | } | ||
| 815 | } | ||
| 816 | |||
| 816 | vmw_kms_save_vga(dev_priv); | 817 | vmw_kms_save_vga(dev_priv); |
| 817 | 818 | ||
| 818 | /* Start kms and overlay systems, needs fifo. */ | 819 | /* Start kms and overlay systems, needs fifo. */ |
| @@ -838,6 +839,12 @@ out_no_fifo: | |||
| 838 | vmw_kms_close(dev_priv); | 839 | vmw_kms_close(dev_priv); |
| 839 | out_no_kms: | 840 | out_no_kms: |
| 840 | vmw_kms_restore_vga(dev_priv); | 841 | vmw_kms_restore_vga(dev_priv); |
| 842 | if (dev_priv->has_mob) | ||
| 843 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 844 | if (dev_priv->has_gmr) | ||
| 845 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
| 846 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 847 | out_no_vram: | ||
| 841 | vmw_fence_manager_takedown(dev_priv->fman); | 848 | vmw_fence_manager_takedown(dev_priv->fman); |
| 842 | out_no_fman: | 849 | out_no_fman: |
| 843 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 850 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| @@ -853,12 +860,6 @@ out_err4: | |||
| 853 | iounmap(dev_priv->mmio_virt); | 860 | iounmap(dev_priv->mmio_virt); |
| 854 | out_err3: | 861 | out_err3: |
| 855 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 862 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 856 | if (dev_priv->has_mob) | ||
| 857 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 858 | if (dev_priv->has_gmr) | ||
| 859 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
| 860 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 861 | out_err2: | ||
| 862 | (void)ttm_bo_device_release(&dev_priv->bdev); | 863 | (void)ttm_bo_device_release(&dev_priv->bdev); |
| 863 | out_err1: | 864 | out_err1: |
| 864 | vmw_ttm_global_release(dev_priv); | 865 | vmw_ttm_global_release(dev_priv); |
| @@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 887 | } | 888 | } |
| 888 | vmw_kms_close(dev_priv); | 889 | vmw_kms_close(dev_priv); |
| 889 | vmw_overlay_close(dev_priv); | 890 | vmw_overlay_close(dev_priv); |
| 891 | |||
| 892 | if (dev_priv->has_mob) | ||
| 893 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 894 | if (dev_priv->has_gmr) | ||
| 895 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
| 896 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 897 | |||
| 890 | vmw_fence_manager_takedown(dev_priv->fman); | 898 | vmw_fence_manager_takedown(dev_priv->fman); |
| 891 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 899 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
| 892 | drm_irq_uninstall(dev_priv->dev); | 900 | drm_irq_uninstall(dev_priv->dev); |
| @@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
| 898 | ttm_object_device_release(&dev_priv->tdev); | 906 | ttm_object_device_release(&dev_priv->tdev); |
| 899 | iounmap(dev_priv->mmio_virt); | 907 | iounmap(dev_priv->mmio_virt); |
| 900 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 908 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
| 901 | if (dev_priv->has_mob) | ||
| 902 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
| 903 | if (dev_priv->has_gmr) | ||
| 904 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
| 905 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 906 | (void)ttm_bo_device_release(&dev_priv->bdev); | 909 | (void)ttm_bo_device_release(&dev_priv->bdev); |
| 907 | vmw_ttm_global_release(dev_priv); | 910 | vmw_ttm_global_release(dev_priv); |
| 908 | 911 | ||
| @@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) | |||
| 1235 | { | 1238 | { |
| 1236 | struct drm_device *dev = pci_get_drvdata(pdev); | 1239 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1237 | 1240 | ||
| 1241 | pci_disable_device(pdev); | ||
| 1238 | drm_put_dev(dev); | 1242 | drm_put_dev(dev); |
| 1239 | } | 1243 | } |
| 1240 | 1244 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
| 890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
| 891 | if (unlikely(ret != 0)) { | 891 | if (unlikely(ret != 0)) { |
| 892 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 892 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
| 893 | return -EINVAL; | 893 | ret = -EINVAL; |
| 894 | goto out_no_reloc; | ||
| 894 | } | 895 | } |
| 895 | bo = &vmw_bo->base; | 896 | bo = &vmw_bo->base; |
| 896 | 897 | ||
| @@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
| 914 | 915 | ||
| 915 | out_no_reloc: | 916 | out_no_reloc: |
| 916 | vmw_dmabuf_unreference(&vmw_bo); | 917 | vmw_dmabuf_unreference(&vmw_bo); |
| 917 | vmw_bo_p = NULL; | 918 | *vmw_bo_p = NULL; |
| 918 | return ret; | 919 | return ret; |
| 919 | } | 920 | } |
| 920 | 921 | ||
| @@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
| 951 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 952 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
| 952 | if (unlikely(ret != 0)) { | 953 | if (unlikely(ret != 0)) { |
| 953 | DRM_ERROR("Could not find or use GMR region.\n"); | 954 | DRM_ERROR("Could not find or use GMR region.\n"); |
| 954 | return -EINVAL; | 955 | ret = -EINVAL; |
| 956 | goto out_no_reloc; | ||
| 955 | } | 957 | } |
| 956 | bo = &vmw_bo->base; | 958 | bo = &vmw_bo->base; |
| 957 | 959 | ||
| @@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
| 974 | 976 | ||
| 975 | out_no_reloc: | 977 | out_no_reloc: |
| 976 | vmw_dmabuf_unreference(&vmw_bo); | 978 | vmw_dmabuf_unreference(&vmw_bo); |
| 977 | vmw_bo_p = NULL; | 979 | *vmw_bo_p = NULL; |
| 978 | return ret; | 980 | return ret; |
| 979 | } | 981 | } |
| 980 | 982 | ||
| @@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
| 2780 | NULL, arg->command_size, arg->throttle_us, | 2782 | NULL, arg->command_size, arg->throttle_us, |
| 2781 | (void __user *)(unsigned long)arg->fence_rep, | 2783 | (void __user *)(unsigned long)arg->fence_rep, |
| 2782 | NULL); | 2784 | NULL); |
| 2783 | 2785 | ttm_read_unlock(&dev_priv->reservation_sem); | |
| 2784 | if (unlikely(ret != 0)) | 2786 | if (unlikely(ret != 0)) |
| 2785 | goto out_unlock; | 2787 | return ret; |
| 2786 | 2788 | ||
| 2787 | vmw_kms_cursor_post_execbuf(dev_priv); | 2789 | vmw_kms_cursor_post_execbuf(dev_priv); |
| 2788 | 2790 | ||
| 2789 | out_unlock: | 2791 | return 0; |
| 2790 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
| 2791 | return ret; | ||
| 2792 | } | 2792 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
| 2033 | int i; | 2033 | int i; |
| 2034 | struct drm_mode_config *mode_config = &dev->mode_config; | 2034 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 2035 | 2035 | ||
| 2036 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | ||
| 2037 | if (unlikely(ret != 0)) | ||
| 2038 | return ret; | ||
| 2039 | |||
| 2040 | if (!arg->num_outputs) { | 2036 | if (!arg->num_outputs) { |
| 2041 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | 2037 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; |
| 2042 | vmw_du_update_layout(dev_priv, 1, &def_rect); | 2038 | vmw_du_update_layout(dev_priv, 1, &def_rect); |
| 2043 | goto out_unlock; | 2039 | return 0; |
| 2044 | } | 2040 | } |
| 2045 | 2041 | ||
| 2046 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | 2042 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); |
| 2047 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), | 2043 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), |
| 2048 | GFP_KERNEL); | 2044 | GFP_KERNEL); |
| 2049 | if (unlikely(!rects)) { | 2045 | if (unlikely(!rects)) |
| 2050 | ret = -ENOMEM; | 2046 | return -ENOMEM; |
| 2051 | goto out_unlock; | ||
| 2052 | } | ||
| 2053 | 2047 | ||
| 2054 | user_rects = (void __user *)(unsigned long)arg->rects; | 2048 | user_rects = (void __user *)(unsigned long)arg->rects; |
| 2055 | ret = copy_from_user(rects, user_rects, rects_size); | 2049 | ret = copy_from_user(rects, user_rects, rects_size); |
| @@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
| 2074 | 2068 | ||
| 2075 | out_free: | 2069 | out_free: |
| 2076 | kfree(rects); | 2070 | kfree(rects); |
| 2077 | out_unlock: | ||
| 2078 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
| 2079 | return ret; | 2071 | return ret; |
| 2080 | } | 2072 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index b61d6be97602..3ddfb3d0b64d 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c | |||
| @@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di, | |||
| 459 | 459 | ||
| 460 | clkrate = clk_get_rate(di->clk_ipu); | 460 | clkrate = clk_get_rate(di->clk_ipu); |
| 461 | div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); | 461 | div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock); |
| 462 | if (div == 0) | ||
| 463 | div = 1; | ||
| 462 | rate = clkrate / div; | 464 | rate = clkrate / div; |
| 463 | 465 | ||
| 464 | error = rate / (sig->mode.pixelclock / 1000); | 466 | error = rate / (sig->mode.pixelclock / 1000); |
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c index 5f1ff4cc5c34..7d7ae97476e2 100644 --- a/drivers/i2c/busses/i2c-designware-baytrail.c +++ b/drivers/i2c/busses/i2c-designware-baytrail.c | |||
| @@ -17,27 +17,31 @@ | |||
| 17 | #include <linux/acpi.h> | 17 | #include <linux/acpi.h> |
| 18 | #include <linux/i2c.h> | 18 | #include <linux/i2c.h> |
| 19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
| 20 | |||
| 20 | #include <asm/iosf_mbi.h> | 21 | #include <asm/iosf_mbi.h> |
| 22 | |||
| 21 | #include "i2c-designware-core.h" | 23 | #include "i2c-designware-core.h" |
| 22 | 24 | ||
| 23 | #define SEMAPHORE_TIMEOUT 100 | 25 | #define SEMAPHORE_TIMEOUT 100 |
| 24 | #define PUNIT_SEMAPHORE 0x7 | 26 | #define PUNIT_SEMAPHORE 0x7 |
| 27 | #define PUNIT_SEMAPHORE_BIT BIT(0) | ||
| 28 | #define PUNIT_SEMAPHORE_ACQUIRE BIT(1) | ||
| 25 | 29 | ||
| 26 | static unsigned long acquired; | 30 | static unsigned long acquired; |
| 27 | 31 | ||
| 28 | static int get_sem(struct device *dev, u32 *sem) | 32 | static int get_sem(struct device *dev, u32 *sem) |
| 29 | { | 33 | { |
| 30 | u32 reg_val; | 34 | u32 data; |
| 31 | int ret; | 35 | int ret; |
| 32 | 36 | ||
| 33 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, | 37 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, |
| 34 | ®_val); | 38 | &data); |
| 35 | if (ret) { | 39 | if (ret) { |
| 36 | dev_err(dev, "iosf failed to read punit semaphore\n"); | 40 | dev_err(dev, "iosf failed to read punit semaphore\n"); |
| 37 | return ret; | 41 | return ret; |
| 38 | } | 42 | } |
| 39 | 43 | ||
| 40 | *sem = reg_val & 0x1; | 44 | *sem = data & PUNIT_SEMAPHORE_BIT; |
| 41 | 45 | ||
| 42 | return 0; | 46 | return 0; |
| 43 | } | 47 | } |
| @@ -52,27 +56,29 @@ static void reset_semaphore(struct device *dev) | |||
| 52 | return; | 56 | return; |
| 53 | } | 57 | } |
| 54 | 58 | ||
| 55 | data = data & 0xfffffffe; | 59 | data &= ~PUNIT_SEMAPHORE_BIT; |
| 56 | if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, | 60 | if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, |
| 57 | PUNIT_SEMAPHORE, data)) | 61 | PUNIT_SEMAPHORE, data)) |
| 58 | dev_err(dev, "iosf failed to reset punit semaphore during write\n"); | 62 | dev_err(dev, "iosf failed to reset punit semaphore during write\n"); |
| 59 | } | 63 | } |
| 60 | 64 | ||
| 61 | int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | 65 | static int baytrail_i2c_acquire(struct dw_i2c_dev *dev) |
| 62 | { | 66 | { |
| 63 | u32 sem = 0; | 67 | u32 sem; |
| 64 | int ret; | 68 | int ret; |
| 65 | unsigned long start, end; | 69 | unsigned long start, end; |
| 66 | 70 | ||
| 71 | might_sleep(); | ||
| 72 | |||
| 67 | if (!dev || !dev->dev) | 73 | if (!dev || !dev->dev) |
| 68 | return -ENODEV; | 74 | return -ENODEV; |
| 69 | 75 | ||
| 70 | if (!dev->acquire_lock) | 76 | if (!dev->release_lock) |
| 71 | return 0; | 77 | return 0; |
| 72 | 78 | ||
| 73 | /* host driver writes 0x2 to side band semaphore register */ | 79 | /* host driver writes to side band semaphore register */ |
| 74 | ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, | 80 | ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, |
| 75 | PUNIT_SEMAPHORE, 0x2); | 81 | PUNIT_SEMAPHORE, PUNIT_SEMAPHORE_ACQUIRE); |
| 76 | if (ret) { | 82 | if (ret) { |
| 77 | dev_err(dev->dev, "iosf punit semaphore request failed\n"); | 83 | dev_err(dev->dev, "iosf punit semaphore request failed\n"); |
| 78 | return ret; | 84 | return ret; |
| @@ -81,7 +87,7 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | |||
| 81 | /* host driver waits for bit 0 to be set in semaphore register */ | 87 | /* host driver waits for bit 0 to be set in semaphore register */ |
| 82 | start = jiffies; | 88 | start = jiffies; |
| 83 | end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); | 89 | end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); |
| 84 | while (!time_after(jiffies, end)) { | 90 | do { |
| 85 | ret = get_sem(dev->dev, &sem); | 91 | ret = get_sem(dev->dev, &sem); |
| 86 | if (!ret && sem) { | 92 | if (!ret && sem) { |
| 87 | acquired = jiffies; | 93 | acquired = jiffies; |
| @@ -91,14 +97,14 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | |||
| 91 | } | 97 | } |
| 92 | 98 | ||
| 93 | usleep_range(1000, 2000); | 99 | usleep_range(1000, 2000); |
| 94 | } | 100 | } while (time_before(jiffies, end)); |
| 95 | 101 | ||
| 96 | dev_err(dev->dev, "punit semaphore timed out, resetting\n"); | 102 | dev_err(dev->dev, "punit semaphore timed out, resetting\n"); |
| 97 | reset_semaphore(dev->dev); | 103 | reset_semaphore(dev->dev); |
| 98 | 104 | ||
| 99 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, | 105 | ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, |
| 100 | PUNIT_SEMAPHORE, &sem); | 106 | PUNIT_SEMAPHORE, &sem); |
| 101 | if (!ret) | 107 | if (ret) |
| 102 | dev_err(dev->dev, "iosf failed to read punit semaphore\n"); | 108 | dev_err(dev->dev, "iosf failed to read punit semaphore\n"); |
| 103 | else | 109 | else |
| 104 | dev_err(dev->dev, "PUNIT SEM: %d\n", sem); | 110 | dev_err(dev->dev, "PUNIT SEM: %d\n", sem); |
| @@ -107,9 +113,8 @@ int baytrail_i2c_acquire(struct dw_i2c_dev *dev) | |||
| 107 | 113 | ||
| 108 | return -ETIMEDOUT; | 114 | return -ETIMEDOUT; |
| 109 | } | 115 | } |
| 110 | EXPORT_SYMBOL(baytrail_i2c_acquire); | ||
| 111 | 116 | ||
| 112 | void baytrail_i2c_release(struct dw_i2c_dev *dev) | 117 | static void baytrail_i2c_release(struct dw_i2c_dev *dev) |
| 113 | { | 118 | { |
| 114 | if (!dev || !dev->dev) | 119 | if (!dev || !dev->dev) |
| 115 | return; | 120 | return; |
| @@ -121,7 +126,6 @@ void baytrail_i2c_release(struct dw_i2c_dev *dev) | |||
| 121 | dev_dbg(dev->dev, "punit semaphore held for %ums\n", | 126 | dev_dbg(dev->dev, "punit semaphore held for %ums\n", |
| 122 | jiffies_to_msecs(jiffies - acquired)); | 127 | jiffies_to_msecs(jiffies - acquired)); |
| 123 | } | 128 | } |
| 124 | EXPORT_SYMBOL(baytrail_i2c_release); | ||
| 125 | 129 | ||
| 126 | int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) | 130 | int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) |
| 127 | { | 131 | { |
| @@ -137,7 +141,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) | |||
| 137 | return 0; | 141 | return 0; |
| 138 | 142 | ||
| 139 | status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); | 143 | status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); |
| 140 | |||
| 141 | if (ACPI_FAILURE(status)) | 144 | if (ACPI_FAILURE(status)) |
| 142 | return 0; | 145 | return 0; |
| 143 | 146 | ||
| @@ -153,7 +156,6 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) | |||
| 153 | 156 | ||
| 154 | return 0; | 157 | return 0; |
| 155 | } | 158 | } |
| 156 | EXPORT_SYMBOL(i2c_dw_eval_lock_support); | ||
| 157 | 159 | ||
| 158 | MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); | 160 | MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); |
| 159 | MODULE_DESCRIPTION("Baytrail I2C Semaphore driver"); | 161 | MODULE_DESCRIPTION("Baytrail I2C Semaphore driver"); |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
| @@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) | |||
| 679 | status = driver->remove(client); | 679 | status = driver->remove(client); |
| 680 | } | 680 | } |
| 681 | 681 | ||
| 682 | if (dev->of_node) | ||
| 683 | irq_dispose_mapping(client->irq); | ||
| 684 | |||
| 685 | dev_pm_domain_detach(&client->dev, true); | 682 | dev_pm_domain_detach(&client->dev, true); |
| 686 | return status; | 683 | return status; |
| 687 | } | 684 | } |
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 51672256072b..b96c636470ef 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c | |||
| @@ -58,20 +58,11 @@ | |||
| 58 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ | 58 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | /* LSB is in nV to eliminate floating point */ | ||
| 62 | static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625}; | ||
| 63 | |||
| 64 | /* | ||
| 65 | * scales calculated as: | ||
| 66 | * rates_to_lsb[sample_rate] / (1 << pga); | ||
| 67 | * pga is 1 for 0, 2 | ||
| 68 | */ | ||
| 69 | |||
| 70 | static const int mcp3422_scales[4][4] = { | 61 | static const int mcp3422_scales[4][4] = { |
| 71 | { 1000000, 250000, 62500, 15625 }, | 62 | { 1000000, 500000, 250000, 125000 }, |
| 72 | { 500000 , 125000, 31250, 7812 }, | 63 | { 250000 , 125000, 62500 , 31250 }, |
| 73 | { 250000 , 62500 , 15625, 3906 }, | 64 | { 62500 , 31250 , 15625 , 7812 }, |
| 74 | { 125000 , 31250 , 7812 , 1953 } }; | 65 | { 15625 , 7812 , 3906 , 1953 } }; |
| 75 | 66 | ||
| 76 | /* Constant msleep times for data acquisitions */ | 67 | /* Constant msleep times for data acquisitions */ |
| 77 | static const int mcp3422_read_times[4] = { | 68 | static const int mcp3422_read_times[4] = { |
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c index b9666f2f5e51..fabd24edc2a1 100644 --- a/drivers/iio/adc/qcom-spmi-iadc.c +++ b/drivers/iio/adc/qcom-spmi-iadc.c | |||
| @@ -296,7 +296,8 @@ static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data) | |||
| 296 | if (iadc->poll_eoc) { | 296 | if (iadc->poll_eoc) { |
| 297 | ret = iadc_poll_wait_eoc(iadc, wait); | 297 | ret = iadc_poll_wait_eoc(iadc, wait); |
| 298 | } else { | 298 | } else { |
| 299 | ret = wait_for_completion_timeout(&iadc->complete, wait); | 299 | ret = wait_for_completion_timeout(&iadc->complete, |
| 300 | usecs_to_jiffies(wait)); | ||
| 300 | if (!ret) | 301 | if (!ret) |
| 301 | ret = -ETIMEDOUT; | 302 | ret = -ETIMEDOUT; |
| 302 | else | 303 | else |
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c index 52d70435f5a1..55a90082a29b 100644 --- a/drivers/iio/common/ssp_sensors/ssp_dev.c +++ b/drivers/iio/common/ssp_sensors/ssp_dev.c | |||
| @@ -640,6 +640,7 @@ static int ssp_remove(struct spi_device *spi) | |||
| 640 | return 0; | 640 | return 0; |
| 641 | } | 641 | } |
| 642 | 642 | ||
| 643 | #ifdef CONFIG_PM_SLEEP | ||
| 643 | static int ssp_suspend(struct device *dev) | 644 | static int ssp_suspend(struct device *dev) |
| 644 | { | 645 | { |
| 645 | int ret; | 646 | int ret; |
| @@ -688,6 +689,7 @@ static int ssp_resume(struct device *dev) | |||
| 688 | 689 | ||
| 689 | return 0; | 690 | return 0; |
| 690 | } | 691 | } |
| 692 | #endif /* CONFIG_PM_SLEEP */ | ||
| 691 | 693 | ||
| 692 | static const struct dev_pm_ops ssp_pm_ops = { | 694 | static const struct dev_pm_ops ssp_pm_ops = { |
| 693 | SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume) | 695 | SET_SYSTEM_SLEEP_PM_OPS(ssp_suspend, ssp_resume) |
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c index f57562aa396f..15c73e20272d 100644 --- a/drivers/iio/dac/ad5686.c +++ b/drivers/iio/dac/ad5686.c | |||
| @@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi) | |||
| 322 | st = iio_priv(indio_dev); | 322 | st = iio_priv(indio_dev); |
| 323 | spi_set_drvdata(spi, indio_dev); | 323 | spi_set_drvdata(spi, indio_dev); |
| 324 | 324 | ||
| 325 | st->reg = devm_regulator_get(&spi->dev, "vcc"); | 325 | st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); |
| 326 | if (!IS_ERR(st->reg)) { | 326 | if (!IS_ERR(st->reg)) { |
| 327 | ret = regulator_enable(st->reg); | 327 | ret = regulator_enable(st->reg); |
| 328 | if (ret) | 328 | if (ret) |
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c index 623c145d8a97..7d79a1ac5f5f 100644 --- a/drivers/iio/humidity/dht11.c +++ b/drivers/iio/humidity/dht11.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/wait.h> | 29 | #include <linux/wait.h> |
| 30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
| 31 | #include <linux/completion.h> | 31 | #include <linux/completion.h> |
| 32 | #include <linux/mutex.h> | ||
| 32 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
| 33 | #include <linux/gpio.h> | 34 | #include <linux/gpio.h> |
| 34 | #include <linux/of_gpio.h> | 35 | #include <linux/of_gpio.h> |
| @@ -39,8 +40,12 @@ | |||
| 39 | 40 | ||
| 40 | #define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */ | 41 | #define DHT11_DATA_VALID_TIME 2000000000 /* 2s in ns */ |
| 41 | 42 | ||
| 42 | #define DHT11_EDGES_PREAMBLE 4 | 43 | #define DHT11_EDGES_PREAMBLE 2 |
| 43 | #define DHT11_BITS_PER_READ 40 | 44 | #define DHT11_BITS_PER_READ 40 |
| 45 | /* | ||
| 46 | * Note that when reading the sensor actually 84 edges are detected, but | ||
| 47 | * since the last edge is not significant, we only store 83: | ||
| 48 | */ | ||
| 44 | #define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1) | 49 | #define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1) |
| 45 | 50 | ||
| 46 | /* Data transmission timing (nano seconds) */ | 51 | /* Data transmission timing (nano seconds) */ |
| @@ -57,6 +62,7 @@ struct dht11 { | |||
| 57 | int irq; | 62 | int irq; |
| 58 | 63 | ||
| 59 | struct completion completion; | 64 | struct completion completion; |
| 65 | struct mutex lock; | ||
| 60 | 66 | ||
| 61 | s64 timestamp; | 67 | s64 timestamp; |
| 62 | int temperature; | 68 | int temperature; |
| @@ -88,7 +94,7 @@ static int dht11_decode(struct dht11 *dht11, int offset) | |||
| 88 | unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; | 94 | unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; |
| 89 | 95 | ||
| 90 | /* Calculate timestamp resolution */ | 96 | /* Calculate timestamp resolution */ |
| 91 | for (i = 0; i < dht11->num_edges; ++i) { | 97 | for (i = 1; i < dht11->num_edges; ++i) { |
| 92 | t = dht11->edges[i].ts - dht11->edges[i-1].ts; | 98 | t = dht11->edges[i].ts - dht11->edges[i-1].ts; |
| 93 | if (t > 0 && t < timeres) | 99 | if (t > 0 && t < timeres) |
| 94 | timeres = t; | 100 | timeres = t; |
| @@ -138,6 +144,27 @@ static int dht11_decode(struct dht11 *dht11, int offset) | |||
| 138 | return 0; | 144 | return 0; |
| 139 | } | 145 | } |
| 140 | 146 | ||
| 147 | /* | ||
| 148 | * IRQ handler called on GPIO edges | ||
| 149 | */ | ||
| 150 | static irqreturn_t dht11_handle_irq(int irq, void *data) | ||
| 151 | { | ||
| 152 | struct iio_dev *iio = data; | ||
| 153 | struct dht11 *dht11 = iio_priv(iio); | ||
| 154 | |||
| 155 | /* TODO: Consider making the handler safe for IRQ sharing */ | ||
| 156 | if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) { | ||
| 157 | dht11->edges[dht11->num_edges].ts = iio_get_time_ns(); | ||
| 158 | dht11->edges[dht11->num_edges++].value = | ||
| 159 | gpio_get_value(dht11->gpio); | ||
| 160 | |||
| 161 | if (dht11->num_edges >= DHT11_EDGES_PER_READ) | ||
| 162 | complete(&dht11->completion); | ||
| 163 | } | ||
| 164 | |||
| 165 | return IRQ_HANDLED; | ||
| 166 | } | ||
| 167 | |||
| 141 | static int dht11_read_raw(struct iio_dev *iio_dev, | 168 | static int dht11_read_raw(struct iio_dev *iio_dev, |
| 142 | const struct iio_chan_spec *chan, | 169 | const struct iio_chan_spec *chan, |
| 143 | int *val, int *val2, long m) | 170 | int *val, int *val2, long m) |
| @@ -145,6 +172,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev, | |||
| 145 | struct dht11 *dht11 = iio_priv(iio_dev); | 172 | struct dht11 *dht11 = iio_priv(iio_dev); |
| 146 | int ret; | 173 | int ret; |
| 147 | 174 | ||
| 175 | mutex_lock(&dht11->lock); | ||
| 148 | if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) { | 176 | if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) { |
| 149 | reinit_completion(&dht11->completion); | 177 | reinit_completion(&dht11->completion); |
| 150 | 178 | ||
| @@ -157,8 +185,17 @@ static int dht11_read_raw(struct iio_dev *iio_dev, | |||
| 157 | if (ret) | 185 | if (ret) |
| 158 | goto err; | 186 | goto err; |
| 159 | 187 | ||
| 188 | ret = request_irq(dht11->irq, dht11_handle_irq, | ||
| 189 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
| 190 | iio_dev->name, iio_dev); | ||
| 191 | if (ret) | ||
| 192 | goto err; | ||
| 193 | |||
| 160 | ret = wait_for_completion_killable_timeout(&dht11->completion, | 194 | ret = wait_for_completion_killable_timeout(&dht11->completion, |
| 161 | HZ); | 195 | HZ); |
| 196 | |||
| 197 | free_irq(dht11->irq, iio_dev); | ||
| 198 | |||
| 162 | if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { | 199 | if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) { |
| 163 | dev_err(&iio_dev->dev, | 200 | dev_err(&iio_dev->dev, |
| 164 | "Only %d signal edges detected\n", | 201 | "Only %d signal edges detected\n", |
| @@ -185,6 +222,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev, | |||
| 185 | ret = -EINVAL; | 222 | ret = -EINVAL; |
| 186 | err: | 223 | err: |
| 187 | dht11->num_edges = -1; | 224 | dht11->num_edges = -1; |
| 225 | mutex_unlock(&dht11->lock); | ||
| 188 | return ret; | 226 | return ret; |
| 189 | } | 227 | } |
| 190 | 228 | ||
| @@ -193,27 +231,6 @@ static const struct iio_info dht11_iio_info = { | |||
| 193 | .read_raw = dht11_read_raw, | 231 | .read_raw = dht11_read_raw, |
| 194 | }; | 232 | }; |
| 195 | 233 | ||
| 196 | /* | ||
| 197 | * IRQ handler called on GPIO edges | ||
| 198 | */ | ||
| 199 | static irqreturn_t dht11_handle_irq(int irq, void *data) | ||
| 200 | { | ||
| 201 | struct iio_dev *iio = data; | ||
| 202 | struct dht11 *dht11 = iio_priv(iio); | ||
| 203 | |||
| 204 | /* TODO: Consider making the handler safe for IRQ sharing */ | ||
| 205 | if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) { | ||
| 206 | dht11->edges[dht11->num_edges].ts = iio_get_time_ns(); | ||
| 207 | dht11->edges[dht11->num_edges++].value = | ||
| 208 | gpio_get_value(dht11->gpio); | ||
| 209 | |||
| 210 | if (dht11->num_edges >= DHT11_EDGES_PER_READ) | ||
| 211 | complete(&dht11->completion); | ||
| 212 | } | ||
| 213 | |||
| 214 | return IRQ_HANDLED; | ||
| 215 | } | ||
| 216 | |||
| 217 | static const struct iio_chan_spec dht11_chan_spec[] = { | 234 | static const struct iio_chan_spec dht11_chan_spec[] = { |
| 218 | { .type = IIO_TEMP, | 235 | { .type = IIO_TEMP, |
| 219 | .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, | 236 | .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), }, |
| @@ -256,11 +273,6 @@ static int dht11_probe(struct platform_device *pdev) | |||
| 256 | dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio); | 273 | dev_err(dev, "GPIO %d has no interrupt\n", dht11->gpio); |
| 257 | return -EINVAL; | 274 | return -EINVAL; |
| 258 | } | 275 | } |
| 259 | ret = devm_request_irq(dev, dht11->irq, dht11_handle_irq, | ||
| 260 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, | ||
| 261 | pdev->name, iio); | ||
| 262 | if (ret) | ||
| 263 | return ret; | ||
| 264 | 276 | ||
| 265 | dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1; | 277 | dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1; |
| 266 | dht11->num_edges = -1; | 278 | dht11->num_edges = -1; |
| @@ -268,6 +280,7 @@ static int dht11_probe(struct platform_device *pdev) | |||
| 268 | platform_set_drvdata(pdev, iio); | 280 | platform_set_drvdata(pdev, iio); |
| 269 | 281 | ||
| 270 | init_completion(&dht11->completion); | 282 | init_completion(&dht11->completion); |
| 283 | mutex_init(&dht11->lock); | ||
| 271 | iio->name = pdev->name; | 284 | iio->name = pdev->name; |
| 272 | iio->dev.parent = &pdev->dev; | 285 | iio->dev.parent = &pdev->dev; |
| 273 | iio->info = &dht11_iio_info; | 286 | iio->info = &dht11_iio_info; |
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c index b54164677b89..fa3b809aff5e 100644 --- a/drivers/iio/humidity/si7020.c +++ b/drivers/iio/humidity/si7020.c | |||
| @@ -45,12 +45,12 @@ static int si7020_read_raw(struct iio_dev *indio_dev, | |||
| 45 | struct iio_chan_spec const *chan, int *val, | 45 | struct iio_chan_spec const *chan, int *val, |
| 46 | int *val2, long mask) | 46 | int *val2, long mask) |
| 47 | { | 47 | { |
| 48 | struct i2c_client *client = iio_priv(indio_dev); | 48 | struct i2c_client **client = iio_priv(indio_dev); |
| 49 | int ret; | 49 | int ret; |
| 50 | 50 | ||
| 51 | switch (mask) { | 51 | switch (mask) { |
| 52 | case IIO_CHAN_INFO_RAW: | 52 | case IIO_CHAN_INFO_RAW: |
| 53 | ret = i2c_smbus_read_word_data(client, | 53 | ret = i2c_smbus_read_word_data(*client, |
| 54 | chan->type == IIO_TEMP ? | 54 | chan->type == IIO_TEMP ? |
| 55 | SI7020CMD_TEMP_HOLD : | 55 | SI7020CMD_TEMP_HOLD : |
| 56 | SI7020CMD_RH_HOLD); | 56 | SI7020CMD_RH_HOLD); |
| @@ -126,7 +126,7 @@ static int si7020_probe(struct i2c_client *client, | |||
| 126 | /* Wait the maximum power-up time after software reset. */ | 126 | /* Wait the maximum power-up time after software reset. */ |
| 127 | msleep(15); | 127 | msleep(15); |
| 128 | 128 | ||
| 129 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client)); | 129 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); |
| 130 | if (!indio_dev) | 130 | if (!indio_dev) |
| 131 | return -ENOMEM; | 131 | return -ENOMEM; |
| 132 | 132 | ||
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index b70873de04ea..fa795dcd5f75 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/debugfs.h> | 28 | #include <linux/debugfs.h> |
| 29 | #include <linux/bitops.h> | ||
| 29 | 30 | ||
| 30 | #include <linux/iio/iio.h> | 31 | #include <linux/iio/iio.h> |
| 31 | #include <linux/iio/sysfs.h> | 32 | #include <linux/iio/sysfs.h> |
| @@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, | |||
| 414 | mutex_unlock(&indio_dev->mlock); | 415 | mutex_unlock(&indio_dev->mlock); |
| 415 | if (ret) | 416 | if (ret) |
| 416 | return ret; | 417 | return ret; |
| 417 | val16 = ((val16 & 0xFFF) << 4) >> 4; | 418 | val16 = sign_extend32(val16, 11); |
| 418 | *val = val16; | 419 | *val = val16; |
| 419 | return IIO_VAL_INT; | 420 | return IIO_VAL_INT; |
| 420 | case IIO_CHAN_INFO_OFFSET: | 421 | case IIO_CHAN_INFO_OFFSET: |
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index f73e60b7a796..d8d5bed65e07 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | |||
| @@ -780,7 +780,11 @@ static int inv_mpu_probe(struct i2c_client *client, | |||
| 780 | 780 | ||
| 781 | i2c_set_clientdata(client, indio_dev); | 781 | i2c_set_clientdata(client, indio_dev); |
| 782 | indio_dev->dev.parent = &client->dev; | 782 | indio_dev->dev.parent = &client->dev; |
| 783 | indio_dev->name = id->name; | 783 | /* id will be NULL when enumerated via ACPI */ |
| 784 | if (id) | ||
| 785 | indio_dev->name = (char *)id->name; | ||
| 786 | else | ||
| 787 | indio_dev->name = (char *)dev_name(&client->dev); | ||
| 784 | indio_dev->channels = inv_mpu_channels; | 788 | indio_dev->channels = inv_mpu_channels; |
| 785 | indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); | 789 | indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); |
| 786 | 790 | ||
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index ae68c64bdad3..a224afd6380c 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig | |||
| @@ -73,6 +73,7 @@ config CM36651 | |||
| 73 | config GP2AP020A00F | 73 | config GP2AP020A00F |
| 74 | tristate "Sharp GP2AP020A00F Proximity/ALS sensor" | 74 | tristate "Sharp GP2AP020A00F Proximity/ALS sensor" |
| 75 | depends on I2C | 75 | depends on I2C |
| 76 | select REGMAP_I2C | ||
| 76 | select IIO_BUFFER | 77 | select IIO_BUFFER |
| 77 | select IIO_TRIGGERED_BUFFER | 78 | select IIO_TRIGGERED_BUFFER |
| 78 | select IRQ_WORK | 79 | select IRQ_WORK |
| @@ -126,6 +127,7 @@ config HID_SENSOR_PROX | |||
| 126 | config JSA1212 | 127 | config JSA1212 |
| 127 | tristate "JSA1212 ALS and proximity sensor driver" | 128 | tristate "JSA1212 ALS and proximity sensor driver" |
| 128 | depends on I2C | 129 | depends on I2C |
| 130 | select REGMAP_I2C | ||
| 129 | help | 131 | help |
| 130 | Say Y here if you want to build a IIO driver for JSA1212 | 132 | Say Y here if you want to build a IIO driver for JSA1212 |
| 131 | proximity & ALS sensor device. | 133 | proximity & ALS sensor device. |
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index 4c7a4c52dd06..a5d6de72c523 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig | |||
| @@ -18,6 +18,8 @@ config AK8975 | |||
| 18 | 18 | ||
| 19 | config AK09911 | 19 | config AK09911 |
| 20 | tristate "Asahi Kasei AK09911 3-axis Compass" | 20 | tristate "Asahi Kasei AK09911 3-axis Compass" |
| 21 | depends on I2C | ||
| 22 | depends on GPIOLIB | ||
| 21 | select AK8975 | 23 | select AK8975 |
| 22 | help | 24 | help |
| 23 | Deprecated: AK09911 is now supported by AK8975 driver. | 25 | Deprecated: AK09911 is now supported by AK8975 driver. |
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 8ff612d160b0..563932500ff1 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c | |||
| @@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) | |||
| 411 | 411 | ||
| 412 | input_set_drvdata(input, keypad); | 412 | input_set_drvdata(input, keypad); |
| 413 | 413 | ||
| 414 | error = request_threaded_irq(irq, NULL, | 414 | error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, |
| 415 | tc3589x_keypad_irq, plat->irqtype, | 415 | plat->irqtype | IRQF_ONESHOT, |
| 416 | "tc3589x-keypad", keypad); | 416 | "tc3589x-keypad", keypad); |
| 417 | if (error < 0) { | 417 | if (error < 0) { |
| 418 | dev_err(&pdev->dev, | 418 | dev_err(&pdev->dev, |
| 419 | "Could not allocate irq %d,error %d\n", | 419 | "Could not allocate irq %d,error %d\n", |
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 59d4dcddf6de..98228773a111 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
| @@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c, | |||
| 187 | idev->private = m; | 187 | idev->private = m; |
| 188 | idev->input->name = MMA8450_DRV_NAME; | 188 | idev->input->name = MMA8450_DRV_NAME; |
| 189 | idev->input->id.bustype = BUS_I2C; | 189 | idev->input->id.bustype = BUS_I2C; |
| 190 | idev->input->dev.parent = &c->dev; | ||
| 190 | idev->poll = mma8450_poll; | 191 | idev->poll = mma8450_poll; |
| 191 | idev->poll_interval = POLL_INTERVAL; | 192 | idev->poll_interval = POLL_INTERVAL; |
| 192 | idev->poll_interval_max = POLL_INTERVAL_MAX; | 193 | idev->poll_interval_max = POLL_INTERVAL_MAX; |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d28726a0ef85..1bd15ebc01f2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
| @@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) | |||
| 2605 | return -ENOMEM; | 2605 | return -ENOMEM; |
| 2606 | 2606 | ||
| 2607 | error = alps_identify(psmouse, priv); | 2607 | error = alps_identify(psmouse, priv); |
| 2608 | if (error) | 2608 | if (error) { |
| 2609 | kfree(priv); | ||
| 2609 | return error; | 2610 | return error; |
| 2611 | } | ||
| 2610 | 2612 | ||
| 2611 | if (set_properties) { | 2613 | if (set_properties) { |
| 2612 | psmouse->vendor = "ALPS"; | 2614 | psmouse->vendor = "ALPS"; |
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 77e9d70a986b..1e2291c378fe 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #include <linux/input/mt.h> | 20 | #include <linux/input/mt.h> |
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 23 | #include <linux/unaligned/access_ok.h> | 23 | #include <asm/unaligned.h> |
| 24 | #include "cyapa.h" | 24 | #include "cyapa.h" |
| 25 | 25 | ||
| 26 | 26 | ||
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index ddf5393a1180..5b611dd71e79 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
| 18 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include <linux/unaligned/access_ok.h> | 20 | #include <asm/unaligned.h> |
| 21 | #include <linux/crc-itu-t.h> | 21 | #include <linux/crc-itu-t.h> |
| 22 | #include "cyapa.h" | 22 | #include "cyapa.h" |
| 23 | 23 | ||
| @@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa, | |||
| 1926 | electrodes_tx = cyapa->electrodes_x; | 1926 | electrodes_tx = cyapa->electrodes_x; |
| 1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & | 1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & |
| 1928 | ~7u) * electrodes_tx; | 1928 | ~7u) * electrodes_tx; |
| 1929 | } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { | 1929 | } else { |
| 1930 | offset = 2; | 1930 | offset = 2; |
| 1931 | max_element_cnt = cyapa->electrodes_x + | 1931 | max_element_cnt = cyapa->electrodes_x + |
| 1932 | cyapa->electrodes_y; | 1932 | cyapa->electrodes_y; |
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 757f78a94aec..23d259416f2f 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c | |||
| @@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse) | |||
| 67 | 67 | ||
| 68 | #define FOC_MAX_FINGERS 5 | 68 | #define FOC_MAX_FINGERS 5 |
| 69 | 69 | ||
| 70 | #define FOC_MAX_X 2431 | ||
| 71 | #define FOC_MAX_Y 1663 | ||
| 72 | |||
| 73 | /* | 70 | /* |
| 74 | * Current state of a single finger on the touchpad. | 71 | * Current state of a single finger on the touchpad. |
| 75 | */ | 72 | */ |
| @@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse) | |||
| 129 | input_mt_slot(dev, i); | 126 | input_mt_slot(dev, i); |
| 130 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); | 127 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); |
| 131 | if (active) { | 128 | if (active) { |
| 132 | input_report_abs(dev, ABS_MT_POSITION_X, finger->x); | 129 | unsigned int clamped_x, clamped_y; |
| 130 | /* | ||
| 131 | * The touchpad might report invalid data, so we clamp | ||
| 132 | * the resulting values so that we do not confuse | ||
| 133 | * userspace. | ||
| 134 | */ | ||
| 135 | clamped_x = clamp(finger->x, 0U, priv->x_max); | ||
| 136 | clamped_y = clamp(finger->y, 0U, priv->y_max); | ||
| 137 | input_report_abs(dev, ABS_MT_POSITION_X, clamped_x); | ||
| 133 | input_report_abs(dev, ABS_MT_POSITION_Y, | 138 | input_report_abs(dev, ABS_MT_POSITION_Y, |
| 134 | FOC_MAX_Y - finger->y); | 139 | priv->y_max - clamped_y); |
| 135 | } | 140 | } |
| 136 | } | 141 | } |
| 137 | input_mt_report_pointer_emulation(dev, true); | 142 | input_mt_report_pointer_emulation(dev, true); |
| @@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse, | |||
| 180 | 185 | ||
| 181 | state->pressed = (packet[0] >> 4) & 1; | 186 | state->pressed = (packet[0] >> 4) & 1; |
| 182 | 187 | ||
| 183 | /* | ||
| 184 | * packet[5] contains some kind of tool size in the most | ||
| 185 | * significant nibble. 0xff is a special value (latching) that | ||
| 186 | * signals a large contact area. | ||
| 187 | */ | ||
| 188 | if (packet[5] == 0xff) { | ||
| 189 | state->fingers[finger].valid = false; | ||
| 190 | return; | ||
| 191 | } | ||
| 192 | |||
| 193 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; | 188 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; |
| 194 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; | 189 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; |
| 195 | state->fingers[finger].valid = true; | 190 | state->fingers[finger].valid = true; |
| @@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse) | |||
| 381 | 376 | ||
| 382 | return 0; | 377 | return 0; |
| 383 | } | 378 | } |
| 379 | |||
| 380 | void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution) | ||
| 381 | { | ||
| 382 | /* not supported yet */ | ||
| 383 | } | ||
| 384 | |||
| 385 | static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate) | ||
| 386 | { | ||
| 387 | /* not supported yet */ | ||
| 388 | } | ||
| 389 | |||
| 390 | static void focaltech_set_scale(struct psmouse *psmouse, | ||
| 391 | enum psmouse_scale scale) | ||
| 392 | { | ||
| 393 | /* not supported yet */ | ||
| 394 | } | ||
| 395 | |||
| 384 | int focaltech_init(struct psmouse *psmouse) | 396 | int focaltech_init(struct psmouse *psmouse) |
| 385 | { | 397 | { |
| 386 | struct focaltech_data *priv; | 398 | struct focaltech_data *priv; |
| @@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse) | |||
| 415 | psmouse->cleanup = focaltech_reset; | 427 | psmouse->cleanup = focaltech_reset; |
| 416 | /* resync is not supported yet */ | 428 | /* resync is not supported yet */ |
| 417 | psmouse->resync_time = 0; | 429 | psmouse->resync_time = 0; |
| 430 | /* | ||
| 431 | * rate/resolution/scale changes are not supported yet, and | ||
| 432 | * the generic implementations of these functions seem to | ||
| 433 | * confuse some touchpads | ||
| 434 | */ | ||
| 435 | psmouse->set_resolution = focaltech_set_resolution; | ||
| 436 | psmouse->set_rate = focaltech_set_rate; | ||
| 437 | psmouse->set_scale = focaltech_set_scale; | ||
| 418 | 438 | ||
| 419 | return 0; | 439 | return 0; |
| 420 | 440 | ||
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 4ccd01d7a48d..8bc61237bc1b 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
| @@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) | |||
| 454 | } | 454 | } |
| 455 | 455 | ||
| 456 | /* | 456 | /* |
| 457 | * Here we set the mouse scaling. | ||
| 458 | */ | ||
| 459 | |||
| 460 | static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) | ||
| 461 | { | ||
| 462 | ps2_command(&psmouse->ps2dev, NULL, | ||
| 463 | scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : | ||
| 464 | PSMOUSE_CMD_SETSCALE11); | ||
| 465 | } | ||
| 466 | |||
| 467 | /* | ||
| 457 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. | 468 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. |
| 458 | */ | 469 | */ |
| 459 | 470 | ||
| @@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) | |||
| 689 | 700 | ||
| 690 | psmouse->set_rate = psmouse_set_rate; | 701 | psmouse->set_rate = psmouse_set_rate; |
| 691 | psmouse->set_resolution = psmouse_set_resolution; | 702 | psmouse->set_resolution = psmouse_set_resolution; |
| 703 | psmouse->set_scale = psmouse_set_scale; | ||
| 692 | psmouse->poll = psmouse_poll; | 704 | psmouse->poll = psmouse_poll; |
| 693 | psmouse->protocol_handler = psmouse_process_byte; | 705 | psmouse->protocol_handler = psmouse_process_byte; |
| 694 | psmouse->pktsize = 3; | 706 | psmouse->pktsize = 3; |
| @@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse) | |||
| 1160 | if (psmouse_max_proto != PSMOUSE_PS2) { | 1172 | if (psmouse_max_proto != PSMOUSE_PS2) { |
| 1161 | psmouse->set_rate(psmouse, psmouse->rate); | 1173 | psmouse->set_rate(psmouse, psmouse->rate); |
| 1162 | psmouse->set_resolution(psmouse, psmouse->resolution); | 1174 | psmouse->set_resolution(psmouse, psmouse->resolution); |
| 1163 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); | 1175 | psmouse->set_scale(psmouse, PSMOUSE_SCALE11); |
| 1164 | } | 1176 | } |
| 1165 | } | 1177 | } |
| 1166 | 1178 | ||
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index c2ff137ecbdb..d02e1bdc9ae4 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h | |||
| @@ -36,6 +36,11 @@ typedef enum { | |||
| 36 | PSMOUSE_FULL_PACKET | 36 | PSMOUSE_FULL_PACKET |
| 37 | } psmouse_ret_t; | 37 | } psmouse_ret_t; |
| 38 | 38 | ||
| 39 | enum psmouse_scale { | ||
| 40 | PSMOUSE_SCALE11, | ||
| 41 | PSMOUSE_SCALE21 | ||
| 42 | }; | ||
| 43 | |||
| 39 | struct psmouse { | 44 | struct psmouse { |
| 40 | void *private; | 45 | void *private; |
| 41 | struct input_dev *dev; | 46 | struct input_dev *dev; |
| @@ -67,6 +72,7 @@ struct psmouse { | |||
| 67 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); | 72 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); |
| 68 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); | 73 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); |
| 69 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); | 74 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); |
| 75 | void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale); | ||
| 70 | 76 | ||
| 71 | int (*reconnect)(struct psmouse *psmouse); | 77 | int (*reconnect)(struct psmouse *psmouse); |
| 72 | void (*disconnect)(struct psmouse *psmouse); | 78 | void (*disconnect)(struct psmouse *psmouse); |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 58917525126e..6261fd6d7c3c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
| @@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I | |||
| 943 | tristate "Allwinner sun4i resistive touchscreen controller support" | 943 | tristate "Allwinner sun4i resistive touchscreen controller support" |
| 944 | depends on ARCH_SUNXI || COMPILE_TEST | 944 | depends on ARCH_SUNXI || COMPILE_TEST |
| 945 | depends on HWMON | 945 | depends on HWMON |
| 946 | depends on THERMAL || !THERMAL_OF | ||
| 946 | help | 947 | help |
| 947 | This selects support for the resistive touchscreen controller | 948 | This selects support for the resistive touchscreen controller |
| 948 | found on Allwinner sunxi SoCs. | 949 | found on Allwinner sunxi SoCs. |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
| @@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE | |||
| 23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
| 24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
| 25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
| 26 | depends on ARM || ARM64 || COMPILE_TEST | ||
| 26 | help | 27 | help |
| 27 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
| 28 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
| @@ -63,6 +64,7 @@ config MSM_IOMMU | |||
| 63 | bool "MSM IOMMU Support" | 64 | bool "MSM IOMMU Support" |
| 64 | depends on ARM | 65 | depends on ARM |
| 65 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST | 66 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST |
| 67 | depends on BROKEN | ||
| 66 | select IOMMU_API | 68 | select IOMMU_API |
| 67 | help | 69 | help |
| 68 | Support for the IOMMUs found on certain Qualcomm SOCs. | 70 | Support for the IOMMUs found on certain Qualcomm SOCs. |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
| @@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { | |||
| 1186 | 1186 | ||
| 1187 | static int __init exynos_iommu_init(void) | 1187 | static int __init exynos_iommu_init(void) |
| 1188 | { | 1188 | { |
| 1189 | struct device_node *np; | ||
| 1189 | int ret; | 1190 | int ret; |
| 1190 | 1191 | ||
| 1192 | np = of_find_matching_node(NULL, sysmmu_of_match); | ||
| 1193 | if (!np) | ||
| 1194 | return 0; | ||
| 1195 | |||
| 1196 | of_node_put(np); | ||
| 1197 | |||
| 1191 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", | 1198 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
| 1192 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | 1199 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); |
| 1193 | if (!lv2table_kmem_cache) { | 1200 | if (!lv2table_kmem_cache) { |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
| @@ -56,7 +56,8 @@ | |||
| 56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ | 56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ |
| 57 | * (d)->bits_per_level) + (d)->pg_shift) | 57 | * (d)->bits_per_level) + (d)->pg_shift) |
| 58 | 58 | ||
| 59 | #define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) | 59 | #define ARM_LPAE_PAGES_PER_PGD(d) \ |
| 60 | DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) | ||
| 60 | 61 | ||
| 61 | /* | 62 | /* |
| 62 | * Calculate the index at level l used to map virtual address a using the | 63 | * Calculate the index at level l used to map virtual address a using the |
| @@ -66,7 +67,7 @@ | |||
| 66 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) | 67 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) |
| 67 | 68 | ||
| 68 | #define ARM_LPAE_LVL_IDX(a,l,d) \ | 69 | #define ARM_LPAE_LVL_IDX(a,l,d) \ |
| 69 | (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ | 70 | (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ |
| 70 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) | 71 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) |
| 71 | 72 | ||
| 72 | /* Calculate the block/page mapping size at level l for pagetable in d. */ | 73 | /* Calculate the block/page mapping size at level l for pagetable in d. */ |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
| @@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) | |||
| 1376 | struct kmem_cache *p; | 1376 | struct kmem_cache *p; |
| 1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
| 1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ | 1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
| 1379 | struct device_node *np; | ||
| 1380 | |||
| 1381 | np = of_find_matching_node(NULL, omap_iommu_of_match); | ||
| 1382 | if (!np) | ||
| 1383 | return 0; | ||
| 1384 | |||
| 1385 | of_node_put(np); | ||
| 1379 | 1386 | ||
| 1380 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | 1387 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
| 1381 | iopte_cachep_ctor); | 1388 | iopte_cachep_ctor); |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
| @@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { | |||
| 1015 | 1015 | ||
| 1016 | static int __init rk_iommu_init(void) | 1016 | static int __init rk_iommu_init(void) |
| 1017 | { | 1017 | { |
| 1018 | struct device_node *np; | ||
| 1018 | int ret; | 1019 | int ret; |
| 1019 | 1020 | ||
| 1021 | np = of_find_matching_node(NULL, rk_iommu_dt_ids); | ||
| 1022 | if (!np) | ||
| 1023 | return 0; | ||
| 1024 | |||
| 1025 | of_node_put(np); | ||
| 1026 | |||
| 1020 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | 1027 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); |
| 1021 | if (ret) | 1028 | if (ret) |
| 1022 | return ret; | 1029 | return ret; |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 463c235acbdc..4387dae14e45 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
| @@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base; | |||
| 69 | static void __iomem *main_int_base; | 69 | static void __iomem *main_int_base; |
| 70 | static struct irq_domain *armada_370_xp_mpic_domain; | 70 | static struct irq_domain *armada_370_xp_mpic_domain; |
| 71 | static u32 doorbell_mask_reg; | 71 | static u32 doorbell_mask_reg; |
| 72 | static int parent_irq; | ||
| 72 | #ifdef CONFIG_PCI_MSI | 73 | #ifdef CONFIG_PCI_MSI |
| 73 | static struct irq_domain *armada_370_xp_msi_domain; | 74 | static struct irq_domain *armada_370_xp_msi_domain; |
| 74 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); | 75 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); |
| @@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, | |||
| 356 | { | 357 | { |
| 357 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 358 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
| 358 | armada_xp_mpic_smp_cpu_init(); | 359 | armada_xp_mpic_smp_cpu_init(); |
| 360 | |||
| 359 | return NOTIFY_OK; | 361 | return NOTIFY_OK; |
| 360 | } | 362 | } |
| 361 | 363 | ||
| @@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { | |||
| 364 | .priority = 100, | 366 | .priority = 100, |
| 365 | }; | 367 | }; |
| 366 | 368 | ||
| 369 | static int mpic_cascaded_secondary_init(struct notifier_block *nfb, | ||
| 370 | unsigned long action, void *hcpu) | ||
| 371 | { | ||
| 372 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | ||
| 373 | enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); | ||
| 374 | |||
| 375 | return NOTIFY_OK; | ||
| 376 | } | ||
| 377 | |||
| 378 | static struct notifier_block mpic_cascaded_cpu_notifier = { | ||
| 379 | .notifier_call = mpic_cascaded_secondary_init, | ||
| 380 | .priority = 100, | ||
| 381 | }; | ||
| 382 | |||
| 367 | #endif /* CONFIG_SMP */ | 383 | #endif /* CONFIG_SMP */ |
| 368 | 384 | ||
| 369 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { | 385 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { |
| @@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
| 539 | struct device_node *parent) | 555 | struct device_node *parent) |
| 540 | { | 556 | { |
| 541 | struct resource main_int_res, per_cpu_int_res; | 557 | struct resource main_int_res, per_cpu_int_res; |
| 542 | int parent_irq, nr_irqs, i; | 558 | int nr_irqs, i; |
| 543 | u32 control; | 559 | u32 control; |
| 544 | 560 | ||
| 545 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); | 561 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); |
| @@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
| 587 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); | 603 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); |
| 588 | #endif | 604 | #endif |
| 589 | } else { | 605 | } else { |
| 606 | #ifdef CONFIG_SMP | ||
| 607 | register_cpu_notifier(&mpic_cascaded_cpu_notifier); | ||
| 608 | #endif | ||
| 590 | irq_set_chained_handler(parent_irq, | 609 | irq_set_chained_handler(parent_irq, |
| 591 | armada_370_xp_mpic_handle_cascade_irq); | 610 | armada_370_xp_mpic_handle_cascade_irq); |
| 592 | } | 611 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8996bdf0f61..596b0a9eee99 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its, | |||
| 416 | { | 416 | { |
| 417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; | 417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; |
| 418 | struct its_collection *sync_col; | 418 | struct its_collection *sync_col; |
| 419 | unsigned long flags; | ||
| 419 | 420 | ||
| 420 | raw_spin_lock(&its->lock); | 421 | raw_spin_lock_irqsave(&its->lock, flags); |
| 421 | 422 | ||
| 422 | cmd = its_allocate_entry(its); | 423 | cmd = its_allocate_entry(its); |
| 423 | if (!cmd) { /* We're soooooo screewed... */ | 424 | if (!cmd) { /* We're soooooo screewed... */ |
| 424 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); | 425 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); |
| 425 | raw_spin_unlock(&its->lock); | 426 | raw_spin_unlock_irqrestore(&its->lock, flags); |
| 426 | return; | 427 | return; |
| 427 | } | 428 | } |
| 428 | sync_col = builder(cmd, desc); | 429 | sync_col = builder(cmd, desc); |
| @@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its, | |||
| 442 | 443 | ||
| 443 | post: | 444 | post: |
| 444 | next_cmd = its_post_commands(its); | 445 | next_cmd = its_post_commands(its); |
| 445 | raw_spin_unlock(&its->lock); | 446 | raw_spin_unlock_irqrestore(&its->lock, flags); |
| 446 | 447 | ||
| 447 | its_wait_for_range_completion(its, cmd, next_cmd); | 448 | its_wait_for_range_completion(its, cmd, next_cmd); |
| 448 | } | 449 | } |
| @@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its) | |||
| 799 | { | 800 | { |
| 800 | int err; | 801 | int err; |
| 801 | int i; | 802 | int i; |
| 802 | int psz = PAGE_SIZE; | 803 | int psz = SZ_64K; |
| 803 | u64 shr = GITS_BASER_InnerShareable; | 804 | u64 shr = GITS_BASER_InnerShareable; |
| 804 | 805 | ||
| 805 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 806 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
| 806 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); | 807 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); |
| 807 | u64 type = GITS_BASER_TYPE(val); | 808 | u64 type = GITS_BASER_TYPE(val); |
| 808 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); | 809 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); |
| 810 | int order = get_order(psz); | ||
| 811 | int alloc_size; | ||
| 809 | u64 tmp; | 812 | u64 tmp; |
| 810 | void *base; | 813 | void *base; |
| 811 | 814 | ||
| 812 | if (type == GITS_BASER_TYPE_NONE) | 815 | if (type == GITS_BASER_TYPE_NONE) |
| 813 | continue; | 816 | continue; |
| 814 | 817 | ||
| 815 | /* We're lazy and only allocate a single page for now */ | 818 | /* |
| 816 | base = (void *)get_zeroed_page(GFP_KERNEL); | 819 | * Allocate as many entries as required to fit the |
| 820 | * range of device IDs that the ITS can grok... The ID | ||
| 821 | * space being incredibly sparse, this results in a | ||
| 822 | * massive waste of memory. | ||
| 823 | * | ||
| 824 | * For other tables, only allocate a single page. | ||
| 825 | */ | ||
| 826 | if (type == GITS_BASER_TYPE_DEVICE) { | ||
| 827 | u64 typer = readq_relaxed(its->base + GITS_TYPER); | ||
| 828 | u32 ids = GITS_TYPER_DEVBITS(typer); | ||
| 829 | |||
| 830 | order = get_order((1UL << ids) * entry_size); | ||
| 831 | if (order >= MAX_ORDER) { | ||
| 832 | order = MAX_ORDER - 1; | ||
| 833 | pr_warn("%s: Device Table too large, reduce its page order to %u\n", | ||
| 834 | its->msi_chip.of_node->full_name, order); | ||
| 835 | } | ||
| 836 | } | ||
| 837 | |||
| 838 | alloc_size = (1 << order) * PAGE_SIZE; | ||
| 839 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
| 817 | if (!base) { | 840 | if (!base) { |
| 818 | err = -ENOMEM; | 841 | err = -ENOMEM; |
| 819 | goto out_free; | 842 | goto out_free; |
| @@ -841,7 +864,7 @@ retry_baser: | |||
| 841 | break; | 864 | break; |
| 842 | } | 865 | } |
| 843 | 866 | ||
| 844 | val |= (PAGE_SIZE / psz) - 1; | 867 | val |= (alloc_size / psz) - 1; |
| 845 | 868 | ||
| 846 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); | 869 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); |
| 847 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); | 870 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); |
| @@ -882,7 +905,7 @@ retry_baser: | |||
| 882 | } | 905 | } |
| 883 | 906 | ||
| 884 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", | 907 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", |
| 885 | (int)(PAGE_SIZE / entry_size), | 908 | (int)(alloc_size / entry_size), |
| 886 | its_base_type_string[type], | 909 | its_base_type_string[type], |
| 887 | (unsigned long)virt_to_phys(base), | 910 | (unsigned long)virt_to_phys(base), |
| 888 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); | 911 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
| @@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void) | |||
| 1020 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | 1043 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) |
| 1021 | { | 1044 | { |
| 1022 | struct its_device *its_dev = NULL, *tmp; | 1045 | struct its_device *its_dev = NULL, *tmp; |
| 1046 | unsigned long flags; | ||
| 1023 | 1047 | ||
| 1024 | raw_spin_lock(&its->lock); | 1048 | raw_spin_lock_irqsave(&its->lock, flags); |
| 1025 | 1049 | ||
| 1026 | list_for_each_entry(tmp, &its->its_device_list, entry) { | 1050 | list_for_each_entry(tmp, &its->its_device_list, entry) { |
| 1027 | if (tmp->device_id == dev_id) { | 1051 | if (tmp->device_id == dev_id) { |
| @@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |||
| 1030 | } | 1054 | } |
| 1031 | } | 1055 | } |
| 1032 | 1056 | ||
| 1033 | raw_spin_unlock(&its->lock); | 1057 | raw_spin_unlock_irqrestore(&its->lock, flags); |
| 1034 | 1058 | ||
| 1035 | return its_dev; | 1059 | return its_dev; |
| 1036 | } | 1060 | } |
| @@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
| 1040 | { | 1064 | { |
| 1041 | struct its_device *dev; | 1065 | struct its_device *dev; |
| 1042 | unsigned long *lpi_map; | 1066 | unsigned long *lpi_map; |
| 1067 | unsigned long flags; | ||
| 1043 | void *itt; | 1068 | void *itt; |
| 1044 | int lpi_base; | 1069 | int lpi_base; |
| 1045 | int nr_lpis; | 1070 | int nr_lpis; |
| @@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
| 1056 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); | 1081 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); |
| 1057 | sz = nr_ites * its->ite_size; | 1082 | sz = nr_ites * its->ite_size; |
| 1058 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 1083 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
| 1059 | itt = kmalloc(sz, GFP_KERNEL); | 1084 | itt = kzalloc(sz, GFP_KERNEL); |
| 1060 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); | 1085 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); |
| 1061 | 1086 | ||
| 1062 | if (!dev || !itt || !lpi_map) { | 1087 | if (!dev || !itt || !lpi_map) { |
| @@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
| 1075 | dev->device_id = dev_id; | 1100 | dev->device_id = dev_id; |
| 1076 | INIT_LIST_HEAD(&dev->entry); | 1101 | INIT_LIST_HEAD(&dev->entry); |
| 1077 | 1102 | ||
| 1078 | raw_spin_lock(&its->lock); | 1103 | raw_spin_lock_irqsave(&its->lock, flags); |
| 1079 | list_add(&dev->entry, &its->its_device_list); | 1104 | list_add(&dev->entry, &its->its_device_list); |
| 1080 | raw_spin_unlock(&its->lock); | 1105 | raw_spin_unlock_irqrestore(&its->lock, flags); |
| 1081 | 1106 | ||
| 1082 | /* Bind the device to the first possible CPU */ | 1107 | /* Bind the device to the first possible CPU */ |
| 1083 | cpu = cpumask_first(cpu_online_mask); | 1108 | cpu = cpumask_first(cpu_online_mask); |
| @@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
| 1091 | 1116 | ||
| 1092 | static void its_free_device(struct its_device *its_dev) | 1117 | static void its_free_device(struct its_device *its_dev) |
| 1093 | { | 1118 | { |
| 1094 | raw_spin_lock(&its_dev->its->lock); | 1119 | unsigned long flags; |
| 1120 | |||
| 1121 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | ||
| 1095 | list_del(&its_dev->entry); | 1122 | list_del(&its_dev->entry); |
| 1096 | raw_spin_unlock(&its_dev->its->lock); | 1123 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
| 1097 | kfree(its_dev->itt); | 1124 | kfree(its_dev->itt); |
| 1098 | kfree(its_dev); | 1125 | kfree(its_dev); |
| 1099 | } | 1126 | } |
| @@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | |||
| 1112 | return 0; | 1139 | return 0; |
| 1113 | } | 1140 | } |
| 1114 | 1141 | ||
| 1142 | struct its_pci_alias { | ||
| 1143 | struct pci_dev *pdev; | ||
| 1144 | u32 dev_id; | ||
| 1145 | u32 count; | ||
| 1146 | }; | ||
| 1147 | |||
| 1148 | static int its_pci_msi_vec_count(struct pci_dev *pdev) | ||
| 1149 | { | ||
| 1150 | int msi, msix; | ||
| 1151 | |||
| 1152 | msi = max(pci_msi_vec_count(pdev), 0); | ||
| 1153 | msix = max(pci_msix_vec_count(pdev), 0); | ||
| 1154 | |||
| 1155 | return max(msi, msix); | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) | ||
| 1159 | { | ||
| 1160 | struct its_pci_alias *dev_alias = data; | ||
| 1161 | |||
| 1162 | dev_alias->dev_id = alias; | ||
| 1163 | if (pdev != dev_alias->pdev) | ||
| 1164 | dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); | ||
| 1165 | |||
| 1166 | return 0; | ||
| 1167 | } | ||
| 1168 | |||
| 1115 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | 1169 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
| 1116 | int nvec, msi_alloc_info_t *info) | 1170 | int nvec, msi_alloc_info_t *info) |
| 1117 | { | 1171 | { |
| 1118 | struct pci_dev *pdev; | 1172 | struct pci_dev *pdev; |
| 1119 | struct its_node *its; | 1173 | struct its_node *its; |
| 1120 | u32 dev_id; | ||
| 1121 | struct its_device *its_dev; | 1174 | struct its_device *its_dev; |
| 1175 | struct its_pci_alias dev_alias; | ||
| 1122 | 1176 | ||
| 1123 | if (!dev_is_pci(dev)) | 1177 | if (!dev_is_pci(dev)) |
| 1124 | return -EINVAL; | 1178 | return -EINVAL; |
| 1125 | 1179 | ||
| 1126 | pdev = to_pci_dev(dev); | 1180 | pdev = to_pci_dev(dev); |
| 1127 | dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); | 1181 | dev_alias.pdev = pdev; |
| 1182 | dev_alias.count = nvec; | ||
| 1183 | |||
| 1184 | pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); | ||
| 1128 | its = domain->parent->host_data; | 1185 | its = domain->parent->host_data; |
| 1129 | 1186 | ||
| 1130 | its_dev = its_find_device(its, dev_id); | 1187 | its_dev = its_find_device(its, dev_alias.dev_id); |
| 1131 | if (WARN_ON(its_dev)) | 1188 | if (its_dev) { |
| 1132 | return -EINVAL; | 1189 | /* |
| 1190 | * We already have seen this ID, probably through | ||
| 1191 | * another alias (PCI bridge of some sort). No need to | ||
| 1192 | * create the device. | ||
| 1193 | */ | ||
| 1194 | dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); | ||
| 1195 | goto out; | ||
| 1196 | } | ||
| 1133 | 1197 | ||
| 1134 | its_dev = its_create_device(its, dev_id, nvec); | 1198 | its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); |
| 1135 | if (!its_dev) | 1199 | if (!its_dev) |
| 1136 | return -ENOMEM; | 1200 | return -ENOMEM; |
| 1137 | 1201 | ||
| 1138 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); | 1202 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", |
| 1139 | 1203 | dev_alias.count, ilog2(dev_alias.count)); | |
| 1204 | out: | ||
| 1140 | info->scratchpad[0].ptr = its_dev; | 1205 | info->scratchpad[0].ptr = its_dev; |
| 1141 | info->scratchpad[1].ptr = dev; | 1206 | info->scratchpad[1].ptr = dev; |
| 1142 | return 0; | 1207 | return 0; |
| @@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = { | |||
| 1255 | .deactivate = its_irq_domain_deactivate, | 1320 | .deactivate = its_irq_domain_deactivate, |
| 1256 | }; | 1321 | }; |
| 1257 | 1322 | ||
| 1323 | static int its_force_quiescent(void __iomem *base) | ||
| 1324 | { | ||
| 1325 | u32 count = 1000000; /* 1s */ | ||
| 1326 | u32 val; | ||
| 1327 | |||
| 1328 | val = readl_relaxed(base + GITS_CTLR); | ||
| 1329 | if (val & GITS_CTLR_QUIESCENT) | ||
| 1330 | return 0; | ||
| 1331 | |||
| 1332 | /* Disable the generation of all interrupts to this ITS */ | ||
| 1333 | val &= ~GITS_CTLR_ENABLE; | ||
| 1334 | writel_relaxed(val, base + GITS_CTLR); | ||
| 1335 | |||
| 1336 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | ||
| 1337 | while (1) { | ||
| 1338 | val = readl_relaxed(base + GITS_CTLR); | ||
| 1339 | if (val & GITS_CTLR_QUIESCENT) | ||
| 1340 | return 0; | ||
| 1341 | |||
| 1342 | count--; | ||
| 1343 | if (!count) | ||
| 1344 | return -EBUSY; | ||
| 1345 | |||
| 1346 | cpu_relax(); | ||
| 1347 | udelay(1); | ||
| 1348 | } | ||
| 1349 | } | ||
| 1350 | |||
| 1258 | static int its_probe(struct device_node *node, struct irq_domain *parent) | 1351 | static int its_probe(struct device_node *node, struct irq_domain *parent) |
| 1259 | { | 1352 | { |
| 1260 | struct resource res; | 1353 | struct resource res; |
| @@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
| 1283 | goto out_unmap; | 1376 | goto out_unmap; |
| 1284 | } | 1377 | } |
| 1285 | 1378 | ||
| 1379 | err = its_force_quiescent(its_base); | ||
| 1380 | if (err) { | ||
| 1381 | pr_warn("%s: failed to quiesce, giving up\n", | ||
| 1382 | node->full_name); | ||
| 1383 | goto out_unmap; | ||
| 1384 | } | ||
| 1385 | |||
| 1286 | pr_info("ITS: %s\n", node->full_name); | 1386 | pr_info("ITS: %s\n", node->full_name); |
| 1287 | 1387 | ||
| 1288 | its = kzalloc(sizeof(*its), GFP_KERNEL); | 1388 | its = kzalloc(sizeof(*its), GFP_KERNEL); |
| @@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
| 1323 | writeq_relaxed(baser, its->base + GITS_CBASER); | 1423 | writeq_relaxed(baser, its->base + GITS_CBASER); |
| 1324 | tmp = readq_relaxed(its->base + GITS_CBASER); | 1424 | tmp = readq_relaxed(its->base + GITS_CBASER); |
| 1325 | writeq_relaxed(0, its->base + GITS_CWRITER); | 1425 | writeq_relaxed(0, its->base + GITS_CWRITER); |
| 1326 | writel_relaxed(1, its->base + GITS_CTLR); | 1426 | writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); |
| 1327 | 1427 | ||
| 1328 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { | 1428 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { |
| 1329 | pr_info("ITS: using cache flushing for cmd queue\n"); | 1429 | pr_info("ITS: using cache flushing for cmd queue\n"); |
| @@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void) | |||
| 1382 | 1482 | ||
| 1383 | int its_cpu_init(void) | 1483 | int its_cpu_init(void) |
| 1384 | { | 1484 | { |
| 1385 | if (!gic_rdists_supports_plpis()) { | ||
| 1386 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
| 1387 | return -ENXIO; | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | if (!list_empty(&its_nodes)) { | 1485 | if (!list_empty(&its_nodes)) { |
| 1486 | if (!gic_rdists_supports_plpis()) { | ||
| 1487 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
| 1488 | return -ENXIO; | ||
| 1489 | } | ||
| 1391 | its_cpu_init_lpis(); | 1490 | its_cpu_init_lpis(); |
| 1392 | its_cpu_init_collection(); | 1491 | its_cpu_init_collection(); |
| 1393 | } | 1492 | } |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..fd8850def1b8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
| @@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |||
| 466 | tlist |= 1 << (mpidr & 0xf); | 466 | tlist |= 1 << (mpidr & 0xf); |
| 467 | 467 | ||
| 468 | cpu = cpumask_next(cpu, mask); | 468 | cpu = cpumask_next(cpu, mask); |
| 469 | if (cpu == nr_cpu_ids) | 469 | if (cpu >= nr_cpu_ids) |
| 470 | goto out; | 470 | goto out; |
| 471 | 471 | ||
| 472 | mpidr = cpu_logical_map(cpu); | 472 | mpidr = cpu_logical_map(cpu); |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4634cf7d0ec3..471e1cdc1933 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d) | |||
| 154 | static void gic_mask_irq(struct irq_data *d) | 154 | static void gic_mask_irq(struct irq_data *d) |
| 155 | { | 155 | { |
| 156 | u32 mask = 1 << (gic_irq(d) % 32); | 156 | u32 mask = 1 << (gic_irq(d) % 32); |
| 157 | unsigned long flags; | ||
| 157 | 158 | ||
| 158 | raw_spin_lock(&irq_controller_lock); | 159 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
| 159 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 160 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
| 160 | if (gic_arch_extn.irq_mask) | 161 | if (gic_arch_extn.irq_mask) |
| 161 | gic_arch_extn.irq_mask(d); | 162 | gic_arch_extn.irq_mask(d); |
| 162 | raw_spin_unlock(&irq_controller_lock); | 163 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
| 163 | } | 164 | } |
| 164 | 165 | ||
| 165 | static void gic_unmask_irq(struct irq_data *d) | 166 | static void gic_unmask_irq(struct irq_data *d) |
| 166 | { | 167 | { |
| 167 | u32 mask = 1 << (gic_irq(d) % 32); | 168 | u32 mask = 1 << (gic_irq(d) % 32); |
| 169 | unsigned long flags; | ||
| 168 | 170 | ||
| 169 | raw_spin_lock(&irq_controller_lock); | 171 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
| 170 | if (gic_arch_extn.irq_unmask) | 172 | if (gic_arch_extn.irq_unmask) |
| 171 | gic_arch_extn.irq_unmask(d); | 173 | gic_arch_extn.irq_unmask(d); |
| 172 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | 174 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
| 173 | raw_spin_unlock(&irq_controller_lock); | 175 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
| 174 | } | 176 | } |
| 175 | 177 | ||
| 176 | static void gic_eoi_irq(struct irq_data *d) | 178 | static void gic_eoi_irq(struct irq_data *d) |
| @@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
| 188 | { | 190 | { |
| 189 | void __iomem *base = gic_dist_base(d); | 191 | void __iomem *base = gic_dist_base(d); |
| 190 | unsigned int gicirq = gic_irq(d); | 192 | unsigned int gicirq = gic_irq(d); |
| 193 | unsigned long flags; | ||
| 191 | int ret; | 194 | int ret; |
| 192 | 195 | ||
| 193 | /* Interrupt configuration for SGIs can't be changed */ | 196 | /* Interrupt configuration for SGIs can't be changed */ |
| @@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
| 199 | type != IRQ_TYPE_EDGE_RISING) | 202 | type != IRQ_TYPE_EDGE_RISING) |
| 200 | return -EINVAL; | 203 | return -EINVAL; |
| 201 | 204 | ||
| 202 | raw_spin_lock(&irq_controller_lock); | 205 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
| 203 | 206 | ||
| 204 | if (gic_arch_extn.irq_set_type) | 207 | if (gic_arch_extn.irq_set_type) |
| 205 | gic_arch_extn.irq_set_type(d, type); | 208 | gic_arch_extn.irq_set_type(d, type); |
| 206 | 209 | ||
| 207 | ret = gic_configure_irq(gicirq, type, base, NULL); | 210 | ret = gic_configure_irq(gicirq, type, base, NULL); |
| 208 | 211 | ||
| 209 | raw_spin_unlock(&irq_controller_lock); | 212 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
| 210 | 213 | ||
| 211 | return ret; | 214 | return ret; |
| 212 | } | 215 | } |
| @@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
| 227 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 230 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
| 228 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; | 231 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; |
| 229 | u32 val, mask, bit; | 232 | u32 val, mask, bit; |
| 233 | unsigned long flags; | ||
| 230 | 234 | ||
| 231 | if (!force) | 235 | if (!force) |
| 232 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | 236 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
| @@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
| 236 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | 240 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
| 237 | return -EINVAL; | 241 | return -EINVAL; |
| 238 | 242 | ||
| 239 | raw_spin_lock(&irq_controller_lock); | 243 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
| 240 | mask = 0xff << shift; | 244 | mask = 0xff << shift; |
| 241 | bit = gic_cpu_map[cpu] << shift; | 245 | bit = gic_cpu_map[cpu] << shift; |
| 242 | val = readl_relaxed(reg) & ~mask; | 246 | val = readl_relaxed(reg) & ~mask; |
| 243 | writel_relaxed(val | bit, reg); | 247 | writel_relaxed(val | bit, reg); |
| 244 | raw_spin_unlock(&irq_controller_lock); | 248 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
| 245 | 249 | ||
| 246 | return IRQ_SET_MASK_OK; | 250 | return IRQ_SET_MASK_OK; |
| 247 | } | 251 | } |
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 3c92780bda09..ff48da61c94c 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c | |||
| @@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc) | |||
| 1755 | enable_hwirq(hc); | 1755 | enable_hwirq(hc); |
| 1756 | spin_unlock_irqrestore(&hc->lock, flags); | 1756 | spin_unlock_irqrestore(&hc->lock, flags); |
| 1757 | /* Timeout 80ms */ | 1757 | /* Timeout 80ms */ |
| 1758 | current->state = TASK_UNINTERRUPTIBLE; | 1758 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1759 | schedule_timeout((80 * HZ) / 1000); | 1759 | schedule_timeout((80 * HZ) / 1000); |
| 1760 | printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", | 1760 | printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", |
| 1761 | hc->irq, hc->irqcnt); | 1761 | hc->irq, hc->irqcnt); |
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 9306219d5675..6ad049a08e4d 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
| @@ -341,6 +341,8 @@ void mei_stop(struct mei_device *dev) | |||
| 341 | 341 | ||
| 342 | dev->dev_state = MEI_DEV_POWER_DOWN; | 342 | dev->dev_state = MEI_DEV_POWER_DOWN; |
| 343 | mei_reset(dev); | 343 | mei_reset(dev); |
| 344 | /* move device to disabled state unconditionally */ | ||
| 345 | dev->dev_state = MEI_DEV_DISABLED; | ||
| 344 | 346 | ||
| 345 | mutex_unlock(&dev->device_lock); | 347 | mutex_unlock(&dev->device_lock); |
| 346 | 348 | ||
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
| @@ -526,6 +526,7 @@ config MTD_NAND_SUNXI | |||
| 526 | 526 | ||
| 527 | config MTD_NAND_HISI504 | 527 | config MTD_NAND_HISI504 |
| 528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" | 528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" |
| 529 | depends on HAS_DMA | ||
| 529 | help | 530 | help |
| 530 | Enables support for NAND controller on Hisilicon SoC Hip04. | 531 | Enables support for NAND controller on Hisilicon SoC Hip04. |
| 531 | 532 | ||
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
| @@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
| 480 | nand_writel(info, NDCR, ndcr | int_mask); | 480 | nand_writel(info, NDCR, ndcr | int_mask); |
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) | ||
| 484 | { | ||
| 485 | if (info->ecc_bch) { | ||
| 486 | int timeout; | ||
| 487 | |||
| 488 | /* | ||
| 489 | * According to the datasheet, when reading from NDDB | ||
| 490 | * with BCH enabled, after each 32 bytes reads, we | ||
| 491 | * have to make sure that the NDSR.RDDREQ bit is set. | ||
| 492 | * | ||
| 493 | * Drain the FIFO 8 32 bits reads at a time, and skip | ||
| 494 | * the polling on the last read. | ||
| 495 | */ | ||
| 496 | while (len > 8) { | ||
| 497 | __raw_readsl(info->mmio_base + NDDB, data, 8); | ||
| 498 | |||
| 499 | for (timeout = 0; | ||
| 500 | !(nand_readl(info, NDSR) & NDSR_RDDREQ); | ||
| 501 | timeout++) { | ||
| 502 | if (timeout >= 5) { | ||
| 503 | dev_err(&info->pdev->dev, | ||
| 504 | "Timeout on RDDREQ while draining the FIFO\n"); | ||
| 505 | return; | ||
| 506 | } | ||
| 507 | |||
| 508 | mdelay(1); | ||
| 509 | } | ||
| 510 | |||
| 511 | data += 32; | ||
| 512 | len -= 8; | ||
| 513 | } | ||
| 514 | } | ||
| 515 | |||
| 516 | __raw_readsl(info->mmio_base + NDDB, data, len); | ||
| 517 | } | ||
| 518 | |||
| 483 | static void handle_data_pio(struct pxa3xx_nand_info *info) | 519 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
| 484 | { | 520 | { |
| 485 | unsigned int do_bytes = min(info->data_size, info->chunk_size); | 521 | unsigned int do_bytes = min(info->data_size, info->chunk_size); |
| @@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) | |||
| 496 | DIV_ROUND_UP(info->oob_size, 4)); | 532 | DIV_ROUND_UP(info->oob_size, 4)); |
| 497 | break; | 533 | break; |
| 498 | case STATE_PIO_READING: | 534 | case STATE_PIO_READING: |
| 499 | __raw_readsl(info->mmio_base + NDDB, | 535 | drain_fifo(info, |
| 500 | info->data_buff + info->data_buff_pos, | 536 | info->data_buff + info->data_buff_pos, |
| 501 | DIV_ROUND_UP(do_bytes, 4)); | 537 | DIV_ROUND_UP(do_bytes, 4)); |
| 502 | 538 | ||
| 503 | if (info->oob_size > 0) | 539 | if (info->oob_size > 0) |
| 504 | __raw_readsl(info->mmio_base + NDDB, | 540 | drain_fifo(info, |
| 505 | info->oob_buff + info->oob_buff_pos, | 541 | info->oob_buff + info->oob_buff_pos, |
| 506 | DIV_ROUND_UP(info->oob_size, 4)); | 542 | DIV_ROUND_UP(info->oob_size, 4)); |
| 507 | break; | 543 | break; |
| 508 | default: | 544 | default: |
| 509 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, | 545 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, |
| @@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
| 1572 | int ret, irq, cs; | 1608 | int ret, irq, cs; |
| 1573 | 1609 | ||
| 1574 | pdata = dev_get_platdata(&pdev->dev); | 1610 | pdata = dev_get_platdata(&pdev->dev); |
| 1611 | if (pdata->num_cs <= 0) | ||
| 1612 | return -ENODEV; | ||
| 1575 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + | 1613 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + |
| 1576 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); | 1614 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); |
| 1577 | if (!info) | 1615 | if (!info) |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 84673ebcf428..df51d6025a90 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -157,7 +157,7 @@ config IPVLAN | |||
| 157 | making it transparent to the connected L2 switch. | 157 | making it transparent to the connected L2 switch. |
| 158 | 158 | ||
| 159 | Ipvlan devices can be added using the "ip" command from the | 159 | Ipvlan devices can be added using the "ip" command from the |
| 160 | iproute2 package starting with the iproute2-X.Y.ZZ release: | 160 | iproute2 package starting with the iproute2-3.19 release: |
| 161 | 161 | ||
| 162 | "ip link add link <main-dev> [ NAME ] type ipvlan" | 162 | "ip link add link <main-dev> [ NAME ] type ipvlan" |
| 163 | 163 | ||
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 4ce6ca5f3d36..dc6b78e5342f 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig | |||
| @@ -40,7 +40,7 @@ config DEV_APPLETALK | |||
| 40 | 40 | ||
| 41 | config LTPC | 41 | config LTPC |
| 42 | tristate "Apple/Farallon LocalTalk PC support" | 42 | tristate "Apple/Farallon LocalTalk PC support" |
| 43 | depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API | 43 | depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS |
| 44 | help | 44 | help |
| 45 | This allows you to use the AppleTalk PC card to connect to LocalTalk | 45 | This allows you to use the AppleTalk PC card to connect to LocalTalk |
| 46 | networks. The card is also known as the Farallon PhoneNet PC card. | 46 | networks. The card is also known as the Farallon PhoneNet PC card. |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
| @@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) | |||
| 579 | skb->pkt_type = PACKET_BROADCAST; | 579 | skb->pkt_type = PACKET_BROADCAST; |
| 580 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 580 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 581 | 581 | ||
| 582 | skb_reset_mac_header(skb); | ||
| 583 | skb_reset_network_header(skb); | ||
| 584 | skb_reset_transport_header(skb); | ||
| 585 | |||
| 582 | can_skb_reserve(skb); | 586 | can_skb_reserve(skb); |
| 583 | can_skb_prv(skb)->ifindex = dev->ifindex; | 587 | can_skb_prv(skb)->ifindex = dev->ifindex; |
| 584 | 588 | ||
| @@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, | |||
| 603 | skb->pkt_type = PACKET_BROADCAST; | 607 | skb->pkt_type = PACKET_BROADCAST; |
| 604 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 608 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 605 | 609 | ||
| 610 | skb_reset_mac_header(skb); | ||
| 611 | skb_reset_network_header(skb); | ||
| 612 | skb_reset_transport_header(skb); | ||
| 613 | |||
| 606 | can_skb_reserve(skb); | 614 | can_skb_reserve(skb); |
| 607 | can_skb_prv(skb)->ifindex = dev->ifindex; | 615 | can_skb_prv(skb)->ifindex = dev->ifindex; |
| 608 | 616 | ||
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..a316fa4b91ab 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | * Copyright (C) 2015 Valeo S.A. | 14 | * Copyright (C) 2015 Valeo S.A. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <linux/kernel.h> | ||
| 17 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
| 18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
| @@ -584,8 +585,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, | |||
| 584 | while (pos <= actual_len - MSG_HEADER_LEN) { | 585 | while (pos <= actual_len - MSG_HEADER_LEN) { |
| 585 | tmp = buf + pos; | 586 | tmp = buf + pos; |
| 586 | 587 | ||
| 587 | if (!tmp->len) | 588 | /* Handle messages crossing the USB endpoint max packet |
| 588 | break; | 589 | * size boundary. Check kvaser_usb_read_bulk_callback() |
| 590 | * for further details. | ||
| 591 | */ | ||
| 592 | if (tmp->len == 0) { | ||
| 593 | pos = round_up(pos, | ||
| 594 | dev->bulk_in->wMaxPacketSize); | ||
| 595 | continue; | ||
| 596 | } | ||
| 589 | 597 | ||
| 590 | if (pos + tmp->len > actual_len) { | 598 | if (pos + tmp->len > actual_len) { |
| 591 | dev_err(dev->udev->dev.parent, | 599 | dev_err(dev->udev->dev.parent, |
| @@ -787,7 +795,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
| 787 | netdev_err(netdev, "Error transmitting URB\n"); | 795 | netdev_err(netdev, "Error transmitting URB\n"); |
| 788 | usb_unanchor_urb(urb); | 796 | usb_unanchor_urb(urb); |
| 789 | usb_free_urb(urb); | 797 | usb_free_urb(urb); |
| 790 | kfree(buf); | ||
| 791 | return err; | 798 | return err; |
| 792 | } | 799 | } |
| 793 | 800 | ||
| @@ -1317,8 +1324,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
| 1317 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { | 1324 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { |
| 1318 | msg = urb->transfer_buffer + pos; | 1325 | msg = urb->transfer_buffer + pos; |
| 1319 | 1326 | ||
| 1320 | if (!msg->len) | 1327 | /* The Kvaser firmware can only read and write messages that |
| 1321 | break; | 1328 | * does not cross the USB's endpoint wMaxPacketSize boundary. |
| 1329 | * If a follow-up command crosses such boundary, firmware puts | ||
| 1330 | * a placeholder zero-length command in its place then aligns | ||
| 1331 | * the real command to the next max packet size. | ||
| 1332 | * | ||
| 1333 | * Handle such cases or we're going to miss a significant | ||
| 1334 | * number of events in case of a heavy rx load on the bus. | ||
| 1335 | */ | ||
| 1336 | if (msg->len == 0) { | ||
| 1337 | pos = round_up(pos, dev->bulk_in->wMaxPacketSize); | ||
| 1338 | continue; | ||
| 1339 | } | ||
| 1322 | 1340 | ||
| 1323 | if (pos + msg->len > urb->actual_length) { | 1341 | if (pos + msg->len > urb->actual_length) { |
| 1324 | dev_err(dev->udev->dev.parent, "Format error\n"); | 1342 | dev_err(dev->udev->dev.parent, "Format error\n"); |
| @@ -1326,7 +1344,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
| 1326 | } | 1344 | } |
| 1327 | 1345 | ||
| 1328 | kvaser_usb_handle_message(dev, msg); | 1346 | kvaser_usb_handle_message(dev, msg); |
| 1329 | |||
| 1330 | pos += msg->len; | 1347 | pos += msg->len; |
| 1331 | } | 1348 | } |
| 1332 | 1349 | ||
| @@ -1615,8 +1632,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1615 | struct urb *urb; | 1632 | struct urb *urb; |
| 1616 | void *buf; | 1633 | void *buf; |
| 1617 | struct kvaser_msg *msg; | 1634 | struct kvaser_msg *msg; |
| 1618 | int i, err; | 1635 | int i, err, ret = NETDEV_TX_OK; |
| 1619 | int ret = NETDEV_TX_OK; | ||
| 1620 | u8 *msg_tx_can_flags = NULL; /* GCC */ | 1636 | u8 *msg_tx_can_flags = NULL; /* GCC */ |
| 1621 | 1637 | ||
| 1622 | if (can_dropped_invalid_skb(netdev, skb)) | 1638 | if (can_dropped_invalid_skb(netdev, skb)) |
| @@ -1634,7 +1650,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1634 | if (!buf) { | 1650 | if (!buf) { |
| 1635 | stats->tx_dropped++; | 1651 | stats->tx_dropped++; |
| 1636 | dev_kfree_skb(skb); | 1652 | dev_kfree_skb(skb); |
| 1637 | goto nobufmem; | 1653 | goto freeurb; |
| 1638 | } | 1654 | } |
| 1639 | 1655 | ||
| 1640 | msg = buf; | 1656 | msg = buf; |
| @@ -1681,8 +1697,10 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1681 | /* This should never happen; it implies a flow control bug */ | 1697 | /* This should never happen; it implies a flow control bug */ |
| 1682 | if (!context) { | 1698 | if (!context) { |
| 1683 | netdev_warn(netdev, "cannot find free context\n"); | 1699 | netdev_warn(netdev, "cannot find free context\n"); |
| 1700 | |||
| 1701 | kfree(buf); | ||
| 1684 | ret = NETDEV_TX_BUSY; | 1702 | ret = NETDEV_TX_BUSY; |
| 1685 | goto releasebuf; | 1703 | goto freeurb; |
| 1686 | } | 1704 | } |
| 1687 | 1705 | ||
| 1688 | context->priv = priv; | 1706 | context->priv = priv; |
| @@ -1719,16 +1737,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1719 | else | 1737 | else |
| 1720 | netdev_warn(netdev, "Failed tx_urb %d\n", err); | 1738 | netdev_warn(netdev, "Failed tx_urb %d\n", err); |
| 1721 | 1739 | ||
| 1722 | goto releasebuf; | 1740 | goto freeurb; |
| 1723 | } | 1741 | } |
| 1724 | 1742 | ||
| 1725 | usb_free_urb(urb); | 1743 | ret = NETDEV_TX_OK; |
| 1726 | |||
| 1727 | return NETDEV_TX_OK; | ||
| 1728 | 1744 | ||
| 1729 | releasebuf: | 1745 | freeurb: |
| 1730 | kfree(buf); | ||
| 1731 | nobufmem: | ||
| 1732 | usb_free_urb(urb); | 1746 | usb_free_urb(urb); |
| 1733 | return ret; | 1747 | return ret; |
| 1734 | } | 1748 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..0bac0f14edc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
| @@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
| 879 | 879 | ||
| 880 | pdev->usb_if = ppdev->usb_if; | 880 | pdev->usb_if = ppdev->usb_if; |
| 881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; | 881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; |
| 882 | |||
| 883 | /* do a copy of the ctrlmode[_supported] too */ | ||
| 884 | dev->can.ctrlmode = ppdev->dev.can.ctrlmode; | ||
| 885 | dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; | ||
| 882 | } | 886 | } |
| 883 | 887 | ||
| 884 | pdev->usb_if->dev[dev->ctrl_idx] = dev; | 888 | pdev->usb_if->dev[dev->ctrl_idx] = dev; |
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index ee9f650d5026..7b7053d3c5fa 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h | |||
| @@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ | |||
| 105 | { \ | 105 | { \ |
| 106 | u32 indir, dir; \ | 106 | u32 indir, dir; \ |
| 107 | spin_lock(&priv->indir_lock); \ | 107 | spin_lock(&priv->indir_lock); \ |
| 108 | indir = reg_readl(priv, REG_DIR_DATA_READ); \ | ||
| 109 | dir = __raw_readl(priv->name + off); \ | 108 | dir = __raw_readl(priv->name + off); \ |
| 109 | indir = reg_readl(priv, REG_DIR_DATA_READ); \ | ||
| 110 | spin_unlock(&priv->indir_lock); \ | 110 | spin_unlock(&priv->indir_lock); \ |
| 111 | return (u64)indir << 32 | dir; \ | 111 | return (u64)indir << 32 | dir; \ |
| 112 | } \ | 112 | } \ |
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 7769c05543f1..ec6eac1f8c95 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c | |||
| @@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev) | |||
| 484 | link->open++; | 484 | link->open++; |
| 485 | 485 | ||
| 486 | info->link_status = 0x00; | 486 | info->link_status = 0x00; |
| 487 | init_timer(&info->watchdog); | 487 | setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); |
| 488 | info->watchdog.function = ei_watchdog; | 488 | mod_timer(&info->watchdog, jiffies + HZ); |
| 489 | info->watchdog.data = (u_long)dev; | ||
| 490 | info->watchdog.expires = jiffies + HZ; | ||
| 491 | add_timer(&info->watchdog); | ||
| 492 | 489 | ||
| 493 | return ax_open(dev); | 490 | return ax_open(dev); |
| 494 | } /* axnet_open */ | 491 | } /* axnet_open */ |
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 9fb7b9d4fd6c..2777289a26c0 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c | |||
| @@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev) | |||
| 918 | 918 | ||
| 919 | info->phy_id = info->eth_phy; | 919 | info->phy_id = info->eth_phy; |
| 920 | info->link_status = 0x00; | 920 | info->link_status = 0x00; |
| 921 | init_timer(&info->watchdog); | 921 | setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); |
| 922 | info->watchdog.function = ei_watchdog; | 922 | mod_timer(&info->watchdog, jiffies + HZ); |
| 923 | info->watchdog.data = (u_long)dev; | ||
| 924 | info->watchdog.expires = jiffies + HZ; | ||
| 925 | add_timer(&info->watchdog); | ||
| 926 | 923 | ||
| 927 | return ei_open(dev); | 924 | return ei_open(dev); |
| 928 | } /* pcnet_open */ | 925 | } /* pcnet_open */ |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 760c72c6e2ac..6725dc00750b 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
| @@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit) | |||
| 376 | u16 pktlength; | 376 | u16 pktlength; |
| 377 | u16 pktstatus; | 377 | u16 pktstatus; |
| 378 | 378 | ||
| 379 | while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { | 379 | while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) && |
| 380 | (count < limit)) { | ||
| 380 | pktstatus = rxstatus >> 16; | 381 | pktstatus = rxstatus >> 16; |
| 381 | pktlength = rxstatus & 0xffff; | 382 | pktlength = rxstatus & 0xffff; |
| 382 | 383 | ||
| @@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget) | |||
| 491 | struct altera_tse_private *priv = | 492 | struct altera_tse_private *priv = |
| 492 | container_of(napi, struct altera_tse_private, napi); | 493 | container_of(napi, struct altera_tse_private, napi); |
| 493 | int rxcomplete = 0; | 494 | int rxcomplete = 0; |
| 494 | int txcomplete = 0; | ||
| 495 | unsigned long int flags; | 495 | unsigned long int flags; |
| 496 | 496 | ||
| 497 | txcomplete = tse_tx_complete(priv); | 497 | tse_tx_complete(priv); |
| 498 | 498 | ||
| 499 | rxcomplete = tse_rx(priv, budget); | 499 | rxcomplete = tse_rx(priv, budget); |
| 500 | 500 | ||
| 501 | if (rxcomplete >= budget || txcomplete > 0) | 501 | if (rxcomplete < budget) { |
| 502 | return rxcomplete; | ||
| 503 | 502 | ||
| 504 | napi_gro_flush(napi, false); | 503 | napi_gro_flush(napi, false); |
| 505 | __napi_complete(napi); | 504 | __napi_complete(napi); |
| 506 | 505 | ||
| 507 | netdev_dbg(priv->dev, | 506 | netdev_dbg(priv->dev, |
| 508 | "NAPI Complete, did %d packets with budget %d\n", | 507 | "NAPI Complete, did %d packets with budget %d\n", |
| 509 | txcomplete+rxcomplete, budget); | 508 | rxcomplete, budget); |
| 510 | 509 | ||
| 511 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); | 510 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); |
| 512 | priv->dmaops->enable_rxirq(priv); | 511 | priv->dmaops->enable_rxirq(priv); |
| 513 | priv->dmaops->enable_txirq(priv); | 512 | priv->dmaops->enable_txirq(priv); |
| 514 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); | 513 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); |
| 515 | return rxcomplete + txcomplete; | 514 | } |
| 515 | return rxcomplete; | ||
| 516 | } | 516 | } |
| 517 | 517 | ||
| 518 | /* DMA TX & RX FIFO interrupt routing | 518 | /* DMA TX & RX FIFO interrupt routing |
| @@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id) | |||
| 521 | { | 521 | { |
| 522 | struct net_device *dev = dev_id; | 522 | struct net_device *dev = dev_id; |
| 523 | struct altera_tse_private *priv; | 523 | struct altera_tse_private *priv; |
| 524 | unsigned long int flags; | ||
| 525 | 524 | ||
| 526 | if (unlikely(!dev)) { | 525 | if (unlikely(!dev)) { |
| 527 | pr_err("%s: invalid dev pointer\n", __func__); | 526 | pr_err("%s: invalid dev pointer\n", __func__); |
| @@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id) | |||
| 529 | } | 528 | } |
| 530 | priv = netdev_priv(dev); | 529 | priv = netdev_priv(dev); |
| 531 | 530 | ||
| 532 | /* turn off desc irqs and enable napi rx */ | 531 | spin_lock(&priv->rxdma_irq_lock); |
| 533 | spin_lock_irqsave(&priv->rxdma_irq_lock, flags); | 532 | /* reset IRQs */ |
| 533 | priv->dmaops->clear_rxirq(priv); | ||
| 534 | priv->dmaops->clear_txirq(priv); | ||
| 535 | spin_unlock(&priv->rxdma_irq_lock); | ||
| 534 | 536 | ||
| 535 | if (likely(napi_schedule_prep(&priv->napi))) { | 537 | if (likely(napi_schedule_prep(&priv->napi))) { |
| 538 | spin_lock(&priv->rxdma_irq_lock); | ||
| 536 | priv->dmaops->disable_rxirq(priv); | 539 | priv->dmaops->disable_rxirq(priv); |
| 537 | priv->dmaops->disable_txirq(priv); | 540 | priv->dmaops->disable_txirq(priv); |
| 541 | spin_unlock(&priv->rxdma_irq_lock); | ||
| 538 | __napi_schedule(&priv->napi); | 542 | __napi_schedule(&priv->napi); |
| 539 | } | 543 | } |
| 540 | 544 | ||
| 541 | /* reset IRQs */ | ||
| 542 | priv->dmaops->clear_rxirq(priv); | ||
| 543 | priv->dmaops->clear_txirq(priv); | ||
| 544 | |||
| 545 | spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); | ||
| 546 | 545 | ||
| 547 | return IRQ_HANDLED; | 546 | return IRQ_HANDLED; |
| 548 | } | 547 | } |
| @@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev) | |||
| 1399 | } | 1398 | } |
| 1400 | 1399 | ||
| 1401 | if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", | 1400 | if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", |
| 1402 | &priv->rx_fifo_depth)) { | 1401 | &priv->tx_fifo_depth)) { |
| 1403 | dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); | 1402 | dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); |
| 1404 | ret = -ENXIO; | 1403 | ret = -ENXIO; |
| 1405 | goto err_free_netdev; | 1404 | goto err_free_netdev; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b93d4404d975..885b02b5be07 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
| @@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) | |||
| 609 | } | 609 | } |
| 610 | } | 610 | } |
| 611 | 611 | ||
| 612 | static int xgbe_request_irqs(struct xgbe_prv_data *pdata) | ||
| 613 | { | ||
| 614 | struct xgbe_channel *channel; | ||
| 615 | struct net_device *netdev = pdata->netdev; | ||
| 616 | unsigned int i; | ||
| 617 | int ret; | ||
| 618 | |||
| 619 | ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, | ||
| 620 | netdev->name, pdata); | ||
| 621 | if (ret) { | ||
| 622 | netdev_alert(netdev, "error requesting irq %d\n", | ||
| 623 | pdata->dev_irq); | ||
| 624 | return ret; | ||
| 625 | } | ||
| 626 | |||
| 627 | if (!pdata->per_channel_irq) | ||
| 628 | return 0; | ||
| 629 | |||
| 630 | channel = pdata->channel; | ||
| 631 | for (i = 0; i < pdata->channel_count; i++, channel++) { | ||
| 632 | snprintf(channel->dma_irq_name, | ||
| 633 | sizeof(channel->dma_irq_name) - 1, | ||
| 634 | "%s-TxRx-%u", netdev_name(netdev), | ||
| 635 | channel->queue_index); | ||
| 636 | |||
| 637 | ret = devm_request_irq(pdata->dev, channel->dma_irq, | ||
| 638 | xgbe_dma_isr, 0, | ||
| 639 | channel->dma_irq_name, channel); | ||
| 640 | if (ret) { | ||
| 641 | netdev_alert(netdev, "error requesting irq %d\n", | ||
| 642 | channel->dma_irq); | ||
| 643 | goto err_irq; | ||
| 644 | } | ||
| 645 | } | ||
| 646 | |||
| 647 | return 0; | ||
| 648 | |||
| 649 | err_irq: | ||
| 650 | /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ | ||
| 651 | for (i--, channel--; i < pdata->channel_count; i--, channel--) | ||
| 652 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
| 653 | |||
| 654 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
| 655 | |||
| 656 | return ret; | ||
| 657 | } | ||
| 658 | |||
| 659 | static void xgbe_free_irqs(struct xgbe_prv_data *pdata) | ||
| 660 | { | ||
| 661 | struct xgbe_channel *channel; | ||
| 662 | unsigned int i; | ||
| 663 | |||
| 664 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
| 665 | |||
| 666 | if (!pdata->per_channel_irq) | ||
| 667 | return; | ||
| 668 | |||
| 669 | channel = pdata->channel; | ||
| 670 | for (i = 0; i < pdata->channel_count; i++, channel++) | ||
| 671 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
| 672 | } | ||
| 673 | |||
| 612 | void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) | 674 | void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) |
| 613 | { | 675 | { |
| 614 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | 676 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
| @@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) | |||
| 810 | return -EINVAL; | 872 | return -EINVAL; |
| 811 | } | 873 | } |
| 812 | 874 | ||
| 813 | phy_stop(pdata->phydev); | ||
| 814 | |||
| 815 | spin_lock_irqsave(&pdata->lock, flags); | 875 | spin_lock_irqsave(&pdata->lock, flags); |
| 816 | 876 | ||
| 817 | if (caller == XGMAC_DRIVER_CONTEXT) | 877 | if (caller == XGMAC_DRIVER_CONTEXT) |
| 818 | netif_device_detach(netdev); | 878 | netif_device_detach(netdev); |
| 819 | 879 | ||
| 820 | netif_tx_stop_all_queues(netdev); | 880 | netif_tx_stop_all_queues(netdev); |
| 821 | xgbe_napi_disable(pdata, 0); | ||
| 822 | 881 | ||
| 823 | /* Powerdown Tx/Rx */ | ||
| 824 | hw_if->powerdown_tx(pdata); | 882 | hw_if->powerdown_tx(pdata); |
| 825 | hw_if->powerdown_rx(pdata); | 883 | hw_if->powerdown_rx(pdata); |
| 826 | 884 | ||
| 885 | xgbe_napi_disable(pdata, 0); | ||
| 886 | |||
| 887 | phy_stop(pdata->phydev); | ||
| 888 | |||
| 827 | pdata->power_down = 1; | 889 | pdata->power_down = 1; |
| 828 | 890 | ||
| 829 | spin_unlock_irqrestore(&pdata->lock, flags); | 891 | spin_unlock_irqrestore(&pdata->lock, flags); |
| @@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) | |||
| 854 | 916 | ||
| 855 | phy_start(pdata->phydev); | 917 | phy_start(pdata->phydev); |
| 856 | 918 | ||
| 857 | /* Enable Tx/Rx */ | 919 | xgbe_napi_enable(pdata, 0); |
| 920 | |||
| 858 | hw_if->powerup_tx(pdata); | 921 | hw_if->powerup_tx(pdata); |
| 859 | hw_if->powerup_rx(pdata); | 922 | hw_if->powerup_rx(pdata); |
| 860 | 923 | ||
| 861 | if (caller == XGMAC_DRIVER_CONTEXT) | 924 | if (caller == XGMAC_DRIVER_CONTEXT) |
| 862 | netif_device_attach(netdev); | 925 | netif_device_attach(netdev); |
| 863 | 926 | ||
| 864 | xgbe_napi_enable(pdata, 0); | ||
| 865 | netif_tx_start_all_queues(netdev); | 927 | netif_tx_start_all_queues(netdev); |
| 866 | 928 | ||
| 867 | spin_unlock_irqrestore(&pdata->lock, flags); | 929 | spin_unlock_irqrestore(&pdata->lock, flags); |
| @@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata) | |||
| 875 | { | 937 | { |
| 876 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | 938 | struct xgbe_hw_if *hw_if = &pdata->hw_if; |
| 877 | struct net_device *netdev = pdata->netdev; | 939 | struct net_device *netdev = pdata->netdev; |
| 940 | int ret; | ||
| 878 | 941 | ||
| 879 | DBGPR("-->xgbe_start\n"); | 942 | DBGPR("-->xgbe_start\n"); |
| 880 | 943 | ||
| @@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata) | |||
| 884 | 947 | ||
| 885 | phy_start(pdata->phydev); | 948 | phy_start(pdata->phydev); |
| 886 | 949 | ||
| 950 | xgbe_napi_enable(pdata, 1); | ||
| 951 | |||
| 952 | ret = xgbe_request_irqs(pdata); | ||
| 953 | if (ret) | ||
| 954 | goto err_napi; | ||
| 955 | |||
| 887 | hw_if->enable_tx(pdata); | 956 | hw_if->enable_tx(pdata); |
| 888 | hw_if->enable_rx(pdata); | 957 | hw_if->enable_rx(pdata); |
| 889 | 958 | ||
| 890 | xgbe_init_tx_timers(pdata); | 959 | xgbe_init_tx_timers(pdata); |
| 891 | 960 | ||
| 892 | xgbe_napi_enable(pdata, 1); | ||
| 893 | netif_tx_start_all_queues(netdev); | 961 | netif_tx_start_all_queues(netdev); |
| 894 | 962 | ||
| 895 | DBGPR("<--xgbe_start\n"); | 963 | DBGPR("<--xgbe_start\n"); |
| 896 | 964 | ||
| 897 | return 0; | 965 | return 0; |
| 966 | |||
| 967 | err_napi: | ||
| 968 | xgbe_napi_disable(pdata, 1); | ||
| 969 | |||
| 970 | phy_stop(pdata->phydev); | ||
| 971 | |||
| 972 | hw_if->exit(pdata); | ||
| 973 | |||
| 974 | return ret; | ||
| 898 | } | 975 | } |
| 899 | 976 | ||
| 900 | static void xgbe_stop(struct xgbe_prv_data *pdata) | 977 | static void xgbe_stop(struct xgbe_prv_data *pdata) |
| @@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) | |||
| 907 | 984 | ||
| 908 | DBGPR("-->xgbe_stop\n"); | 985 | DBGPR("-->xgbe_stop\n"); |
| 909 | 986 | ||
| 910 | phy_stop(pdata->phydev); | ||
| 911 | |||
| 912 | netif_tx_stop_all_queues(netdev); | 987 | netif_tx_stop_all_queues(netdev); |
| 913 | xgbe_napi_disable(pdata, 1); | ||
| 914 | 988 | ||
| 915 | xgbe_stop_tx_timers(pdata); | 989 | xgbe_stop_tx_timers(pdata); |
| 916 | 990 | ||
| 917 | hw_if->disable_tx(pdata); | 991 | hw_if->disable_tx(pdata); |
| 918 | hw_if->disable_rx(pdata); | 992 | hw_if->disable_rx(pdata); |
| 919 | 993 | ||
| 994 | xgbe_free_irqs(pdata); | ||
| 995 | |||
| 996 | xgbe_napi_disable(pdata, 1); | ||
| 997 | |||
| 998 | phy_stop(pdata->phydev); | ||
| 999 | |||
| 1000 | hw_if->exit(pdata); | ||
| 1001 | |||
| 920 | channel = pdata->channel; | 1002 | channel = pdata->channel; |
| 921 | for (i = 0; i < pdata->channel_count; i++, channel++) { | 1003 | for (i = 0; i < pdata->channel_count; i++, channel++) { |
| 922 | if (!channel->tx_ring) | 1004 | if (!channel->tx_ring) |
| @@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) | |||
| 931 | 1013 | ||
| 932 | static void xgbe_restart_dev(struct xgbe_prv_data *pdata) | 1014 | static void xgbe_restart_dev(struct xgbe_prv_data *pdata) |
| 933 | { | 1015 | { |
| 934 | struct xgbe_channel *channel; | ||
| 935 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | ||
| 936 | unsigned int i; | ||
| 937 | |||
| 938 | DBGPR("-->xgbe_restart_dev\n"); | 1016 | DBGPR("-->xgbe_restart_dev\n"); |
| 939 | 1017 | ||
| 940 | /* If not running, "restart" will happen on open */ | 1018 | /* If not running, "restart" will happen on open */ |
| @@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) | |||
| 942 | return; | 1020 | return; |
| 943 | 1021 | ||
| 944 | xgbe_stop(pdata); | 1022 | xgbe_stop(pdata); |
| 945 | synchronize_irq(pdata->dev_irq); | ||
| 946 | if (pdata->per_channel_irq) { | ||
| 947 | channel = pdata->channel; | ||
| 948 | for (i = 0; i < pdata->channel_count; i++, channel++) | ||
| 949 | synchronize_irq(channel->dma_irq); | ||
| 950 | } | ||
| 951 | 1023 | ||
| 952 | xgbe_free_tx_data(pdata); | 1024 | xgbe_free_tx_data(pdata); |
| 953 | xgbe_free_rx_data(pdata); | 1025 | xgbe_free_rx_data(pdata); |
| 954 | 1026 | ||
| 955 | /* Issue software reset to device */ | ||
| 956 | hw_if->exit(pdata); | ||
| 957 | |||
| 958 | xgbe_start(pdata); | 1027 | xgbe_start(pdata); |
| 959 | 1028 | ||
| 960 | DBGPR("<--xgbe_restart_dev\n"); | 1029 | DBGPR("<--xgbe_restart_dev\n"); |
| @@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, | |||
| 1283 | static int xgbe_open(struct net_device *netdev) | 1352 | static int xgbe_open(struct net_device *netdev) |
| 1284 | { | 1353 | { |
| 1285 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 1354 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
| 1286 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | ||
| 1287 | struct xgbe_desc_if *desc_if = &pdata->desc_if; | 1355 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
| 1288 | struct xgbe_channel *channel = NULL; | ||
| 1289 | unsigned int i = 0; | ||
| 1290 | int ret; | 1356 | int ret; |
| 1291 | 1357 | ||
| 1292 | DBGPR("-->xgbe_open\n"); | 1358 | DBGPR("-->xgbe_open\n"); |
| @@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev) | |||
| 1329 | INIT_WORK(&pdata->restart_work, xgbe_restart); | 1395 | INIT_WORK(&pdata->restart_work, xgbe_restart); |
| 1330 | INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); | 1396 | INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); |
| 1331 | 1397 | ||
| 1332 | /* Request interrupts */ | ||
| 1333 | ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, | ||
| 1334 | netdev->name, pdata); | ||
| 1335 | if (ret) { | ||
| 1336 | netdev_alert(netdev, "error requesting irq %d\n", | ||
| 1337 | pdata->dev_irq); | ||
| 1338 | goto err_rings; | ||
| 1339 | } | ||
| 1340 | |||
| 1341 | if (pdata->per_channel_irq) { | ||
| 1342 | channel = pdata->channel; | ||
| 1343 | for (i = 0; i < pdata->channel_count; i++, channel++) { | ||
| 1344 | snprintf(channel->dma_irq_name, | ||
| 1345 | sizeof(channel->dma_irq_name) - 1, | ||
| 1346 | "%s-TxRx-%u", netdev_name(netdev), | ||
| 1347 | channel->queue_index); | ||
| 1348 | |||
| 1349 | ret = devm_request_irq(pdata->dev, channel->dma_irq, | ||
| 1350 | xgbe_dma_isr, 0, | ||
| 1351 | channel->dma_irq_name, channel); | ||
| 1352 | if (ret) { | ||
| 1353 | netdev_alert(netdev, | ||
| 1354 | "error requesting irq %d\n", | ||
| 1355 | channel->dma_irq); | ||
| 1356 | goto err_irq; | ||
| 1357 | } | ||
| 1358 | } | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | ret = xgbe_start(pdata); | 1398 | ret = xgbe_start(pdata); |
| 1362 | if (ret) | 1399 | if (ret) |
| 1363 | goto err_start; | 1400 | goto err_rings; |
| 1364 | 1401 | ||
| 1365 | DBGPR("<--xgbe_open\n"); | 1402 | DBGPR("<--xgbe_open\n"); |
| 1366 | 1403 | ||
| 1367 | return 0; | 1404 | return 0; |
| 1368 | 1405 | ||
| 1369 | err_start: | ||
| 1370 | hw_if->exit(pdata); | ||
| 1371 | |||
| 1372 | err_irq: | ||
| 1373 | if (pdata->per_channel_irq) { | ||
| 1374 | /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ | ||
| 1375 | for (i--, channel--; i < pdata->channel_count; i--, channel--) | ||
| 1376 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
| 1377 | } | ||
| 1378 | |||
| 1379 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
| 1380 | |||
| 1381 | err_rings: | 1406 | err_rings: |
| 1382 | desc_if->free_ring_resources(pdata); | 1407 | desc_if->free_ring_resources(pdata); |
| 1383 | 1408 | ||
| @@ -1399,30 +1424,16 @@ err_phy_init: | |||
| 1399 | static int xgbe_close(struct net_device *netdev) | 1424 | static int xgbe_close(struct net_device *netdev) |
| 1400 | { | 1425 | { |
| 1401 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 1426 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
| 1402 | struct xgbe_hw_if *hw_if = &pdata->hw_if; | ||
| 1403 | struct xgbe_desc_if *desc_if = &pdata->desc_if; | 1427 | struct xgbe_desc_if *desc_if = &pdata->desc_if; |
| 1404 | struct xgbe_channel *channel; | ||
| 1405 | unsigned int i; | ||
| 1406 | 1428 | ||
| 1407 | DBGPR("-->xgbe_close\n"); | 1429 | DBGPR("-->xgbe_close\n"); |
| 1408 | 1430 | ||
| 1409 | /* Stop the device */ | 1431 | /* Stop the device */ |
| 1410 | xgbe_stop(pdata); | 1432 | xgbe_stop(pdata); |
| 1411 | 1433 | ||
| 1412 | /* Issue software reset to device */ | ||
| 1413 | hw_if->exit(pdata); | ||
| 1414 | |||
| 1415 | /* Free the ring descriptors and buffers */ | 1434 | /* Free the ring descriptors and buffers */ |
| 1416 | desc_if->free_ring_resources(pdata); | 1435 | desc_if->free_ring_resources(pdata); |
| 1417 | 1436 | ||
| 1418 | /* Release the interrupts */ | ||
| 1419 | devm_free_irq(pdata->dev, pdata->dev_irq, pdata); | ||
| 1420 | if (pdata->per_channel_irq) { | ||
| 1421 | channel = pdata->channel; | ||
| 1422 | for (i = 0; i < pdata->channel_count; i++, channel++) | ||
| 1423 | devm_free_irq(pdata->dev, channel->dma_irq, channel); | ||
| 1424 | } | ||
| 1425 | |||
| 1426 | /* Free the channel and ring structures */ | 1437 | /* Free the channel and ring structures */ |
| 1427 | xgbe_free_channels(pdata); | 1438 | xgbe_free_channels(pdata); |
| 1428 | 1439 | ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
| @@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) | |||
| 593 | if (!xgene_ring_mgr_init(pdata)) | 593 | if (!xgene_ring_mgr_init(pdata)) |
| 594 | return -ENODEV; | 594 | return -ENODEV; |
| 595 | 595 | ||
| 596 | if (!efi_enabled(EFI_BOOT)) { | 596 | if (pdata->clk) { |
| 597 | clk_prepare_enable(pdata->clk); | 597 | clk_prepare_enable(pdata->clk); |
| 598 | clk_disable_unprepare(pdata->clk); | 598 | clk_disable_unprepare(pdata->clk); |
| 599 | clk_prepare_enable(pdata->clk); | 599 | clk_prepare_enable(pdata->clk); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
| @@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) | |||
| 1025 | #ifdef CONFIG_ACPI | 1025 | #ifdef CONFIG_ACPI |
| 1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { | 1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { |
| 1027 | { "APMC0D05", }, | 1027 | { "APMC0D05", }, |
| 1028 | { "APMC0D30", }, | ||
| 1029 | { "APMC0D31", }, | ||
| 1028 | { } | 1030 | { } |
| 1029 | }; | 1031 | }; |
| 1030 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | 1032 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); |
| @@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | |||
| 1033 | #ifdef CONFIG_OF | 1035 | #ifdef CONFIG_OF |
| 1034 | static struct of_device_id xgene_enet_of_match[] = { | 1036 | static struct of_device_id xgene_enet_of_match[] = { |
| 1035 | {.compatible = "apm,xgene-enet",}, | 1037 | {.compatible = "apm,xgene-enet",}, |
| 1038 | {.compatible = "apm,xgene1-sgenet",}, | ||
| 1039 | {.compatible = "apm,xgene1-xgenet",}, | ||
| 1036 | {}, | 1040 | {}, |
| 1037 | }; | 1041 | }; |
| 1038 | 1042 | ||
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
| @@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
| 486 | { | 486 | { |
| 487 | struct bcm_enet_priv *priv; | 487 | struct bcm_enet_priv *priv; |
| 488 | struct net_device *dev; | 488 | struct net_device *dev; |
| 489 | int tx_work_done, rx_work_done; | 489 | int rx_work_done; |
| 490 | 490 | ||
| 491 | priv = container_of(napi, struct bcm_enet_priv, napi); | 491 | priv = container_of(napi, struct bcm_enet_priv, napi); |
| 492 | dev = priv->net_dev; | 492 | dev = priv->net_dev; |
| @@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
| 498 | ENETDMAC_IR, priv->tx_chan); | 498 | ENETDMAC_IR, priv->tx_chan); |
| 499 | 499 | ||
| 500 | /* reclaim sent skb */ | 500 | /* reclaim sent skb */ |
| 501 | tx_work_done = bcm_enet_tx_reclaim(dev, 0); | 501 | bcm_enet_tx_reclaim(dev, 0); |
| 502 | 502 | ||
| 503 | spin_lock(&priv->rx_lock); | 503 | spin_lock(&priv->rx_lock); |
| 504 | rx_work_done = bcm_enet_receive_queue(dev, budget); | 504 | rx_work_done = bcm_enet_receive_queue(dev, budget); |
| 505 | spin_unlock(&priv->rx_lock); | 505 | spin_unlock(&priv->rx_lock); |
| 506 | 506 | ||
| 507 | if (rx_work_done >= budget || tx_work_done > 0) { | 507 | if (rx_work_done >= budget) { |
| 508 | /* rx/tx queue is not yet empty/clean */ | 508 | /* rx queue is not yet empty/clean */ |
| 509 | return rx_work_done; | 509 | return rx_work_done; |
| 510 | } | 510 | } |
| 511 | 511 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5b308a4a4d0e..783543ad1fcf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
| @@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { | |||
| 274 | /* RBUF misc statistics */ | 274 | /* RBUF misc statistics */ |
| 275 | STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), | 275 | STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), |
| 276 | STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), | 276 | STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), |
| 277 | STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), | 277 | STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
| 278 | STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), | 278 | STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), |
| 279 | STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), | 279 | STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), |
| 280 | }; | 280 | }; |
| 281 | 281 | ||
| 282 | #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) | 282 | #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) |
| @@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) | |||
| 345 | s = &bcm_sysport_gstrings_stats[i]; | 345 | s = &bcm_sysport_gstrings_stats[i]; |
| 346 | switch (s->type) { | 346 | switch (s->type) { |
| 347 | case BCM_SYSPORT_STAT_NETDEV: | 347 | case BCM_SYSPORT_STAT_NETDEV: |
| 348 | case BCM_SYSPORT_STAT_SOFT: | ||
| 348 | continue; | 349 | continue; |
| 349 | case BCM_SYSPORT_STAT_MIB_RX: | 350 | case BCM_SYSPORT_STAT_MIB_RX: |
| 350 | case BCM_SYSPORT_STAT_MIB_TX: | 351 | case BCM_SYSPORT_STAT_MIB_TX: |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index fc19417d82a5..7e3d87a88c76 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
| @@ -570,6 +570,7 @@ enum bcm_sysport_stat_type { | |||
| 570 | BCM_SYSPORT_STAT_RUNT, | 570 | BCM_SYSPORT_STAT_RUNT, |
| 571 | BCM_SYSPORT_STAT_RXCHK, | 571 | BCM_SYSPORT_STAT_RXCHK, |
| 572 | BCM_SYSPORT_STAT_RBUF, | 572 | BCM_SYSPORT_STAT_RBUF, |
| 573 | BCM_SYSPORT_STAT_SOFT, | ||
| 573 | }; | 574 | }; |
| 574 | 575 | ||
| 575 | /* Macros to help define ethtool statistics */ | 576 | /* Macros to help define ethtool statistics */ |
| @@ -590,6 +591,7 @@ enum bcm_sysport_stat_type { | |||
| 590 | #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) | 591 | #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) |
| 591 | #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) | 592 | #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) |
| 592 | #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) | 593 | #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) |
| 594 | #define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT) | ||
| 593 | 595 | ||
| 594 | #define STAT_RXCHK(str, m, ofs) { \ | 596 | #define STAT_RXCHK(str, m, ofs) { \ |
| 595 | .stat_string = str, \ | 597 | .stat_string = str, \ |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
| @@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, | |||
| 302 | slot->skb = skb; | 302 | slot->skb = skb; |
| 303 | slot->dma_addr = dma_addr; | 303 | slot->dma_addr = dma_addr; |
| 304 | 304 | ||
| 305 | if (slot->dma_addr & 0xC0000000) | ||
| 306 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
| 307 | |||
| 308 | return 0; | 305 | return 0; |
| 309 | } | 306 | } |
| 310 | 307 | ||
| @@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
| 505 | ring->mmio_base); | 502 | ring->mmio_base); |
| 506 | goto err_dma_free; | 503 | goto err_dma_free; |
| 507 | } | 504 | } |
| 508 | if (ring->dma_base & 0xC0000000) | ||
| 509 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
| 510 | 505 | ||
| 511 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 506 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
| 512 | BGMAC_DMA_RING_TX); | 507 | BGMAC_DMA_RING_TX); |
| @@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
| 536 | err = -ENOMEM; | 531 | err = -ENOMEM; |
| 537 | goto err_dma_free; | 532 | goto err_dma_free; |
| 538 | } | 533 | } |
| 539 | if (ring->dma_base & 0xC0000000) | ||
| 540 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
| 541 | 534 | ||
| 542 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 535 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
| 543 | BGMAC_DMA_RING_RX); | 536 | BGMAC_DMA_RING_RX); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..bef750a09027 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
| 12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
| 12723 | PCICFG_VENDOR_ID_OFFSET); | 12723 | PCICFG_VENDOR_ID_OFFSET); |
| 12724 | 12724 | ||
| 12725 | /* Set PCIe reset type to fundamental for EEH recovery */ | ||
| 12726 | pdev->needs_freset = 1; | ||
| 12727 | |||
| 12725 | /* AER (Advanced Error reporting) configuration */ | 12728 | /* AER (Advanced Error reporting) configuration */ |
| 12726 | rc = pci_enable_pcie_error_reporting(pdev); | 12729 | rc = pci_enable_pcie_error_reporting(pdev); |
| 12727 | if (!rc) | 12730 | if (!rc) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff83c46bc389..6befde61c203 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
| @@ -487,6 +487,7 @@ enum bcmgenet_stat_type { | |||
| 487 | BCMGENET_STAT_MIB_TX, | 487 | BCMGENET_STAT_MIB_TX, |
| 488 | BCMGENET_STAT_RUNT, | 488 | BCMGENET_STAT_RUNT, |
| 489 | BCMGENET_STAT_MISC, | 489 | BCMGENET_STAT_MISC, |
| 490 | BCMGENET_STAT_SOFT, | ||
| 490 | }; | 491 | }; |
| 491 | 492 | ||
| 492 | struct bcmgenet_stats { | 493 | struct bcmgenet_stats { |
| @@ -515,6 +516,7 @@ struct bcmgenet_stats { | |||
| 515 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | 516 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) |
| 516 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | 517 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) |
| 517 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | 518 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) |
| 519 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) | ||
| 518 | 520 | ||
| 519 | #define STAT_GENET_MISC(str, m, offset) { \ | 521 | #define STAT_GENET_MISC(str, m, offset) { \ |
| 520 | .stat_string = str, \ | 522 | .stat_string = str, \ |
| @@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |||
| 614 | UMAC_RBUF_OVFL_CNT), | 616 | UMAC_RBUF_OVFL_CNT), |
| 615 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | 617 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), |
| 616 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | 618 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), |
| 617 | STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), | 619 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
| 618 | STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), | 620 | STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), |
| 619 | STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), | 621 | STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), |
| 620 | }; | 622 | }; |
| 621 | 623 | ||
| 622 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | 624 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) |
| @@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |||
| 668 | s = &bcmgenet_gstrings_stats[i]; | 670 | s = &bcmgenet_gstrings_stats[i]; |
| 669 | switch (s->type) { | 671 | switch (s->type) { |
| 670 | case BCMGENET_STAT_NETDEV: | 672 | case BCMGENET_STAT_NETDEV: |
| 673 | case BCMGENET_STAT_SOFT: | ||
| 671 | continue; | 674 | continue; |
| 672 | case BCMGENET_STAT_MIB_RX: | 675 | case BCMGENET_STAT_MIB_RX: |
| 673 | case BCMGENET_STAT_MIB_TX: | 676 | case BCMGENET_STAT_MIB_TX: |
| @@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, | |||
| 971 | } | 974 | } |
| 972 | 975 | ||
| 973 | /* Unlocked version of the reclaim routine */ | 976 | /* Unlocked version of the reclaim routine */ |
| 974 | static void __bcmgenet_tx_reclaim(struct net_device *dev, | 977 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
| 975 | struct bcmgenet_tx_ring *ring) | 978 | struct bcmgenet_tx_ring *ring) |
| 976 | { | 979 | { |
| 977 | struct bcmgenet_priv *priv = netdev_priv(dev); | 980 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 978 | int last_tx_cn, last_c_index, num_tx_bds; | 981 | int last_tx_cn, last_c_index, num_tx_bds; |
| 979 | struct enet_cb *tx_cb_ptr; | 982 | struct enet_cb *tx_cb_ptr; |
| 980 | struct netdev_queue *txq; | 983 | struct netdev_queue *txq; |
| 984 | unsigned int pkts_compl = 0; | ||
| 981 | unsigned int bds_compl; | 985 | unsigned int bds_compl; |
| 982 | unsigned int c_index; | 986 | unsigned int c_index; |
| 983 | 987 | ||
| @@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 1005 | tx_cb_ptr = ring->cbs + last_c_index; | 1009 | tx_cb_ptr = ring->cbs + last_c_index; |
| 1006 | bds_compl = 0; | 1010 | bds_compl = 0; |
| 1007 | if (tx_cb_ptr->skb) { | 1011 | if (tx_cb_ptr->skb) { |
| 1012 | pkts_compl++; | ||
| 1008 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; | 1013 | bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; |
| 1009 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | 1014 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
| 1010 | dma_unmap_single(&dev->dev, | 1015 | dma_unmap_single(&dev->dev, |
| @@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 1028 | last_c_index &= (num_tx_bds - 1); | 1033 | last_c_index &= (num_tx_bds - 1); |
| 1029 | } | 1034 | } |
| 1030 | 1035 | ||
| 1031 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) | 1036 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
| 1032 | ring->int_disable(priv, ring); | 1037 | if (netif_tx_queue_stopped(txq)) |
| 1033 | 1038 | netif_tx_wake_queue(txq); | |
| 1034 | if (netif_tx_queue_stopped(txq)) | 1039 | } |
| 1035 | netif_tx_wake_queue(txq); | ||
| 1036 | 1040 | ||
| 1037 | ring->c_index = c_index; | 1041 | ring->c_index = c_index; |
| 1042 | |||
| 1043 | return pkts_compl; | ||
| 1038 | } | 1044 | } |
| 1039 | 1045 | ||
| 1040 | static void bcmgenet_tx_reclaim(struct net_device *dev, | 1046 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
| 1041 | struct bcmgenet_tx_ring *ring) | 1047 | struct bcmgenet_tx_ring *ring) |
| 1042 | { | 1048 | { |
| 1049 | unsigned int released; | ||
| 1043 | unsigned long flags; | 1050 | unsigned long flags; |
| 1044 | 1051 | ||
| 1045 | spin_lock_irqsave(&ring->lock, flags); | 1052 | spin_lock_irqsave(&ring->lock, flags); |
| 1046 | __bcmgenet_tx_reclaim(dev, ring); | 1053 | released = __bcmgenet_tx_reclaim(dev, ring); |
| 1047 | spin_unlock_irqrestore(&ring->lock, flags); | 1054 | spin_unlock_irqrestore(&ring->lock, flags); |
| 1055 | |||
| 1056 | return released; | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) | ||
| 1060 | { | ||
| 1061 | struct bcmgenet_tx_ring *ring = | ||
| 1062 | container_of(napi, struct bcmgenet_tx_ring, napi); | ||
| 1063 | unsigned int work_done = 0; | ||
| 1064 | |||
| 1065 | work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); | ||
| 1066 | |||
| 1067 | if (work_done == 0) { | ||
| 1068 | napi_complete(napi); | ||
| 1069 | ring->int_enable(ring->priv, ring); | ||
| 1070 | |||
| 1071 | return 0; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | return budget; | ||
| 1048 | } | 1075 | } |
| 1049 | 1076 | ||
| 1050 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | 1077 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) |
| @@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1302 | bcmgenet_tdma_ring_writel(priv, ring->index, | 1329 | bcmgenet_tdma_ring_writel(priv, ring->index, |
| 1303 | ring->prod_index, TDMA_PROD_INDEX); | 1330 | ring->prod_index, TDMA_PROD_INDEX); |
| 1304 | 1331 | ||
| 1305 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { | 1332 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
| 1306 | netif_tx_stop_queue(txq); | 1333 | netif_tx_stop_queue(txq); |
| 1307 | ring->int_enable(priv, ring); | ||
| 1308 | } | ||
| 1309 | 1334 | ||
| 1310 | out: | 1335 | out: |
| 1311 | spin_unlock_irqrestore(&ring->lock, flags); | 1336 | spin_unlock_irqrestore(&ring->lock, flags); |
| @@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1621 | struct device *kdev = &priv->pdev->dev; | 1646 | struct device *kdev = &priv->pdev->dev; |
| 1622 | int ret; | 1647 | int ret; |
| 1623 | u32 reg, cpu_mask_clear; | 1648 | u32 reg, cpu_mask_clear; |
| 1649 | int index; | ||
| 1624 | 1650 | ||
| 1625 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | 1651 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); |
| 1626 | 1652 | ||
| @@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1647 | 1673 | ||
| 1648 | bcmgenet_intr_disable(priv); | 1674 | bcmgenet_intr_disable(priv); |
| 1649 | 1675 | ||
| 1650 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; | 1676 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE; |
| 1651 | 1677 | ||
| 1652 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); | 1678 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); |
| 1653 | 1679 | ||
| @@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
| 1674 | 1700 | ||
| 1675 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); | 1701 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); |
| 1676 | 1702 | ||
| 1703 | for (index = 0; index < priv->hw_params->tx_queues; index++) | ||
| 1704 | bcmgenet_intrl2_1_writel(priv, (1 << index), | ||
| 1705 | INTRL2_CPU_MASK_CLEAR); | ||
| 1706 | |||
| 1677 | /* Enable rx/tx engine.*/ | 1707 | /* Enable rx/tx engine.*/ |
| 1678 | dev_dbg(kdev, "done init umac\n"); | 1708 | dev_dbg(kdev, "done init umac\n"); |
| 1679 | 1709 | ||
| @@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
| 1693 | unsigned int first_bd; | 1723 | unsigned int first_bd; |
| 1694 | 1724 | ||
| 1695 | spin_lock_init(&ring->lock); | 1725 | spin_lock_init(&ring->lock); |
| 1726 | ring->priv = priv; | ||
| 1727 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); | ||
| 1696 | ring->index = index; | 1728 | ring->index = index; |
| 1697 | if (index == DESC_INDEX) { | 1729 | if (index == DESC_INDEX) { |
| 1698 | ring->queue = 0; | 1730 | ring->queue = 0; |
| @@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
| 1738 | TDMA_WRITE_PTR); | 1770 | TDMA_WRITE_PTR); |
| 1739 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | 1771 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
| 1740 | DMA_END_ADDR); | 1772 | DMA_END_ADDR); |
| 1773 | |||
| 1774 | napi_enable(&ring->napi); | ||
| 1775 | } | ||
| 1776 | |||
| 1777 | static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, | ||
| 1778 | unsigned int index) | ||
| 1779 | { | ||
| 1780 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | ||
| 1781 | |||
| 1782 | napi_disable(&ring->napi); | ||
| 1783 | netif_napi_del(&ring->napi); | ||
| 1741 | } | 1784 | } |
| 1742 | 1785 | ||
| 1743 | /* Initialize a RDMA ring */ | 1786 | /* Initialize a RDMA ring */ |
| @@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |||
| 1907 | return ret; | 1950 | return ret; |
| 1908 | } | 1951 | } |
| 1909 | 1952 | ||
| 1910 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | 1953 | static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
| 1911 | { | 1954 | { |
| 1912 | int i; | 1955 | int i; |
| 1913 | 1956 | ||
| @@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | |||
| 1926 | kfree(priv->tx_cbs); | 1969 | kfree(priv->tx_cbs); |
| 1927 | } | 1970 | } |
| 1928 | 1971 | ||
| 1972 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | ||
| 1973 | { | ||
| 1974 | int i; | ||
| 1975 | |||
| 1976 | bcmgenet_fini_tx_ring(priv, DESC_INDEX); | ||
| 1977 | |||
| 1978 | for (i = 0; i < priv->hw_params->tx_queues; i++) | ||
| 1979 | bcmgenet_fini_tx_ring(priv, i); | ||
| 1980 | |||
| 1981 | __bcmgenet_fini_dma(priv); | ||
| 1982 | } | ||
| 1983 | |||
| 1929 | /* init_edma: Initialize DMA control register */ | 1984 | /* init_edma: Initialize DMA control register */ |
| 1930 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | 1985 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) |
| 1931 | { | 1986 | { |
| @@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |||
| 1952 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), | 2007 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
| 1953 | GFP_KERNEL); | 2008 | GFP_KERNEL); |
| 1954 | if (!priv->tx_cbs) { | 2009 | if (!priv->tx_cbs) { |
| 1955 | bcmgenet_fini_dma(priv); | 2010 | __bcmgenet_fini_dma(priv); |
| 1956 | return -ENOMEM; | 2011 | return -ENOMEM; |
| 1957 | } | 2012 | } |
| 1958 | 2013 | ||
| @@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) | |||
| 1975 | struct bcmgenet_priv, napi); | 2030 | struct bcmgenet_priv, napi); |
| 1976 | unsigned int work_done; | 2031 | unsigned int work_done; |
| 1977 | 2032 | ||
| 1978 | /* tx reclaim */ | ||
| 1979 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | ||
| 1980 | |||
| 1981 | work_done = bcmgenet_desc_rx(priv, budget); | 2033 | work_done = bcmgenet_desc_rx(priv, budget); |
| 1982 | 2034 | ||
| 1983 | /* Advancing our consumer index*/ | 2035 | /* Advancing our consumer index*/ |
| @@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work) | |||
| 2022 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | 2074 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
| 2023 | { | 2075 | { |
| 2024 | struct bcmgenet_priv *priv = dev_id; | 2076 | struct bcmgenet_priv *priv = dev_id; |
| 2077 | struct bcmgenet_tx_ring *ring; | ||
| 2025 | unsigned int index; | 2078 | unsigned int index; |
| 2026 | 2079 | ||
| 2027 | /* Save irq status for bottom-half processing. */ | 2080 | /* Save irq status for bottom-half processing. */ |
| 2028 | priv->irq1_stat = | 2081 | priv->irq1_stat = |
| 2029 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | 2082 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & |
| 2030 | ~priv->int1_mask; | 2083 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
| 2031 | /* clear interrupts */ | 2084 | /* clear interrupts */ |
| 2032 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); | 2085 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
| 2033 | 2086 | ||
| 2034 | netif_dbg(priv, intr, priv->dev, | 2087 | netif_dbg(priv, intr, priv->dev, |
| 2035 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); | 2088 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
| 2089 | |||
| 2036 | /* Check the MBDONE interrupts. | 2090 | /* Check the MBDONE interrupts. |
| 2037 | * packet is done, reclaim descriptors | 2091 | * packet is done, reclaim descriptors |
| 2038 | */ | 2092 | */ |
| 2039 | if (priv->irq1_stat & 0x0000ffff) { | 2093 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
| 2040 | index = 0; | 2094 | if (!(priv->irq1_stat & BIT(index))) |
| 2041 | for (index = 0; index < 16; index++) { | 2095 | continue; |
| 2042 | if (priv->irq1_stat & (1 << index)) | 2096 | |
| 2043 | bcmgenet_tx_reclaim(priv->dev, | 2097 | ring = &priv->tx_rings[index]; |
| 2044 | &priv->tx_rings[index]); | 2098 | |
| 2099 | if (likely(napi_schedule_prep(&ring->napi))) { | ||
| 2100 | ring->int_disable(priv, ring); | ||
| 2101 | __napi_schedule(&ring->napi); | ||
| 2045 | } | 2102 | } |
| 2046 | } | 2103 | } |
| 2104 | |||
| 2047 | return IRQ_HANDLED; | 2105 | return IRQ_HANDLED; |
| 2048 | } | 2106 | } |
| 2049 | 2107 | ||
| @@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |||
| 2075 | } | 2133 | } |
| 2076 | if (priv->irq0_stat & | 2134 | if (priv->irq0_stat & |
| 2077 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { | 2135 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { |
| 2078 | /* Tx reclaim */ | 2136 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX]; |
| 2079 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | 2137 | |
| 2138 | if (likely(napi_schedule_prep(&ring->napi))) { | ||
| 2139 | ring->int_disable(priv, ring); | ||
| 2140 | __napi_schedule(&ring->napi); | ||
| 2141 | } | ||
| 2080 | } | 2142 | } |
| 2081 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | | 2143 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | |
| 2082 | UMAC_IRQ_PHY_DET_F | | 2144 | UMAC_IRQ_PHY_DET_F | |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b36ddec0cc0a..0d370d168aee 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
| @@ -520,6 +520,7 @@ struct bcmgenet_hw_params { | |||
| 520 | 520 | ||
| 521 | struct bcmgenet_tx_ring { | 521 | struct bcmgenet_tx_ring { |
| 522 | spinlock_t lock; /* ring lock */ | 522 | spinlock_t lock; /* ring lock */ |
| 523 | struct napi_struct napi; /* NAPI per tx queue */ | ||
| 523 | unsigned int index; /* ring index */ | 524 | unsigned int index; /* ring index */ |
| 524 | unsigned int queue; /* queue index */ | 525 | unsigned int queue; /* queue index */ |
| 525 | struct enet_cb *cbs; /* tx ring buffer control block*/ | 526 | struct enet_cb *cbs; /* tx ring buffer control block*/ |
| @@ -534,6 +535,7 @@ struct bcmgenet_tx_ring { | |||
| 534 | struct bcmgenet_tx_ring *); | 535 | struct bcmgenet_tx_ring *); |
| 535 | void (*int_disable)(struct bcmgenet_priv *priv, | 536 | void (*int_disable)(struct bcmgenet_priv *priv, |
| 536 | struct bcmgenet_tx_ring *); | 537 | struct bcmgenet_tx_ring *); |
| 538 | struct bcmgenet_priv *priv; | ||
| 537 | }; | 539 | }; |
| 538 | 540 | ||
| 539 | /* device context */ | 541 | /* device context */ |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | |||
| @@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
| 73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) | 73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) |
| 74 | return -EINVAL; | 74 | return -EINVAL; |
| 75 | 75 | ||
| 76 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
| 76 | if (wol->wolopts & WAKE_MAGICSECURE) { | 77 | if (wol->wolopts & WAKE_MAGICSECURE) { |
| 77 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | 78 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), |
| 78 | UMAC_MPD_PW_MS); | 79 | UMAC_MPD_PW_MS); |
| 79 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | 80 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), |
| 80 | UMAC_MPD_PW_LS); | 81 | UMAC_MPD_PW_LS); |
| 81 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
| 82 | reg |= MPD_PW_EN; | 82 | reg |= MPD_PW_EN; |
| 83 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | 83 | } else { |
| 84 | reg &= ~MPD_PW_EN; | ||
| 84 | } | 85 | } |
| 86 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
| 85 | 87 | ||
| 86 | /* Flag the device and relevant IRQ as wakeup capable */ | 88 | /* Flag the device and relevant IRQ as wakeup capable */ |
| 87 | if (wol->wolopts) { | 89 | if (wol->wolopts) { |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
| @@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { | |||
| 2113 | }; | 2113 | }; |
| 2114 | 2114 | ||
| 2115 | #if defined(CONFIG_OF) | 2115 | #if defined(CONFIG_OF) |
| 2116 | static struct macb_config pc302gem_config = { | 2116 | static const struct macb_config pc302gem_config = { |
| 2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
| 2118 | .dma_burst_length = 16, | 2118 | .dma_burst_length = 16, |
| 2119 | }; | 2119 | }; |
| 2120 | 2120 | ||
| 2121 | static struct macb_config sama5d3_config = { | 2121 | static const struct macb_config sama5d3_config = { |
| 2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
| 2123 | .dma_burst_length = 16, | 2123 | .dma_burst_length = 16, |
| 2124 | }; | 2124 | }; |
| 2125 | 2125 | ||
| 2126 | static struct macb_config sama5d4_config = { | 2126 | static const struct macb_config sama5d4_config = { |
| 2127 | .caps = 0, | 2127 | .caps = 0, |
| 2128 | .dma_burst_length = 4, | 2128 | .dma_burst_length = 4, |
| 2129 | }; | 2129 | }; |
| @@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) | |||
| 2154 | if (bp->pdev->dev.of_node) { | 2154 | if (bp->pdev->dev.of_node) { |
| 2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); | 2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); |
| 2156 | if (match && match->data) { | 2156 | if (match && match->data) { |
| 2157 | config = (const struct macb_config *)match->data; | 2157 | config = match->data; |
| 2158 | 2158 | ||
| 2159 | bp->caps = config->caps; | 2159 | bp->caps = config->caps; |
| 2160 | /* | 2160 | /* |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
| @@ -351,7 +351,7 @@ | |||
| 351 | 351 | ||
| 352 | /* Bitfields in MID */ | 352 | /* Bitfields in MID */ |
| 353 | #define MACB_IDNUM_OFFSET 16 | 353 | #define MACB_IDNUM_OFFSET 16 |
| 354 | #define MACB_IDNUM_SIZE 16 | 354 | #define MACB_IDNUM_SIZE 12 |
| 355 | #define MACB_REV_OFFSET 0 | 355 | #define MACB_REV_OFFSET 0 |
| 356 | #define MACB_REV_SIZE 16 | 356 | #define MACB_REV_SIZE 16 |
| 357 | 357 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 9062a8434246..c308429dd9c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | |||
| @@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key) | |||
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, | 37 | static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, |
| 38 | int addr_len) | 38 | u8 v6) |
| 39 | { | 39 | { |
| 40 | return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : | 40 | return v6 ? ipv6_clip_hash(ctbl, addr) : |
| 41 | ipv6_clip_hash(ctbl, addr); | 41 | ipv4_clip_hash(ctbl, addr); |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | static int clip6_get_mbox(const struct net_device *dev, | 44 | static int clip6_get_mbox(const struct net_device *dev, |
| @@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) | |||
| 78 | struct clip_entry *ce, *cte; | 78 | struct clip_entry *ce, *cte; |
| 79 | u32 *addr = (u32 *)lip; | 79 | u32 *addr = (u32 *)lip; |
| 80 | int hash; | 80 | int hash; |
| 81 | int addr_len; | 81 | int ret = -1; |
| 82 | int ret = 0; | ||
| 83 | 82 | ||
| 84 | if (!ctbl) | 83 | if (!ctbl) |
| 85 | return 0; | 84 | return 0; |
| 86 | 85 | ||
| 87 | if (v6) | 86 | hash = clip_addr_hash(ctbl, addr, v6); |
| 88 | addr_len = 16; | ||
| 89 | else | ||
| 90 | addr_len = 4; | ||
| 91 | |||
| 92 | hash = clip_addr_hash(ctbl, addr, addr_len); | ||
| 93 | 87 | ||
| 94 | read_lock_bh(&ctbl->lock); | 88 | read_lock_bh(&ctbl->lock); |
| 95 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { | 89 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { |
| 96 | if (addr_len == cte->addr_len && | 90 | if (cte->addr6.sin6_family == AF_INET6 && v6) |
| 97 | memcmp(lip, cte->addr, cte->addr_len) == 0) { | 91 | ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, |
| 92 | sizeof(struct in6_addr)); | ||
| 93 | else if (cte->addr.sin_family == AF_INET && !v6) | ||
| 94 | ret = memcmp(lip, (char *)(&cte->addr.sin_addr), | ||
| 95 | sizeof(struct in_addr)); | ||
| 96 | if (!ret) { | ||
| 98 | ce = cte; | 97 | ce = cte; |
| 99 | read_unlock_bh(&ctbl->lock); | 98 | read_unlock_bh(&ctbl->lock); |
| 100 | goto found; | 99 | goto found; |
| @@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) | |||
| 111 | spin_lock_init(&ce->lock); | 110 | spin_lock_init(&ce->lock); |
| 112 | atomic_set(&ce->refcnt, 0); | 111 | atomic_set(&ce->refcnt, 0); |
| 113 | atomic_dec(&ctbl->nfree); | 112 | atomic_dec(&ctbl->nfree); |
| 114 | ce->addr_len = addr_len; | ||
| 115 | memcpy(ce->addr, lip, addr_len); | ||
| 116 | list_add_tail(&ce->list, &ctbl->hash_list[hash]); | 113 | list_add_tail(&ce->list, &ctbl->hash_list[hash]); |
| 117 | if (v6) { | 114 | if (v6) { |
| 115 | ce->addr6.sin6_family = AF_INET6; | ||
| 116 | memcpy(ce->addr6.sin6_addr.s6_addr, | ||
| 117 | lip, sizeof(struct in6_addr)); | ||
| 118 | ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); | 118 | ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); |
| 119 | if (ret) { | 119 | if (ret) { |
| 120 | write_unlock_bh(&ctbl->lock); | 120 | write_unlock_bh(&ctbl->lock); |
| 121 | return ret; | 121 | return ret; |
| 122 | } | 122 | } |
| 123 | } else { | ||
| 124 | ce->addr.sin_family = AF_INET; | ||
| 125 | memcpy((char *)(&ce->addr.sin_addr), lip, | ||
| 126 | sizeof(struct in_addr)); | ||
| 123 | } | 127 | } |
| 124 | } else { | 128 | } else { |
| 125 | write_unlock_bh(&ctbl->lock); | 129 | write_unlock_bh(&ctbl->lock); |
| @@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) | |||
| 140 | struct clip_entry *ce, *cte; | 144 | struct clip_entry *ce, *cte; |
| 141 | u32 *addr = (u32 *)lip; | 145 | u32 *addr = (u32 *)lip; |
| 142 | int hash; | 146 | int hash; |
| 143 | int addr_len; | 147 | int ret = -1; |
| 144 | |||
| 145 | if (v6) | ||
| 146 | addr_len = 16; | ||
| 147 | else | ||
| 148 | addr_len = 4; | ||
| 149 | 148 | ||
| 150 | hash = clip_addr_hash(ctbl, addr, addr_len); | 149 | hash = clip_addr_hash(ctbl, addr, v6); |
| 151 | 150 | ||
| 152 | read_lock_bh(&ctbl->lock); | 151 | read_lock_bh(&ctbl->lock); |
| 153 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { | 152 | list_for_each_entry(cte, &ctbl->hash_list[hash], list) { |
| 154 | if (addr_len == cte->addr_len && | 153 | if (cte->addr6.sin6_family == AF_INET6 && v6) |
| 155 | memcmp(lip, cte->addr, cte->addr_len) == 0) { | 154 | ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, |
| 155 | sizeof(struct in6_addr)); | ||
| 156 | else if (cte->addr.sin_family == AF_INET && !v6) | ||
| 157 | ret = memcmp(lip, (char *)(&cte->addr.sin_addr), | ||
| 158 | sizeof(struct in_addr)); | ||
| 159 | if (!ret) { | ||
| 156 | ce = cte; | 160 | ce = cte; |
| 157 | read_unlock_bh(&ctbl->lock); | 161 | read_unlock_bh(&ctbl->lock); |
| 158 | goto found; | 162 | goto found; |
| @@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v) | |||
| 249 | for (i = 0 ; i < ctbl->clipt_size; ++i) { | 253 | for (i = 0 ; i < ctbl->clipt_size; ++i) { |
| 250 | list_for_each_entry(ce, &ctbl->hash_list[i], list) { | 254 | list_for_each_entry(ce, &ctbl->hash_list[i], list) { |
| 251 | ip[0] = '\0'; | 255 | ip[0] = '\0'; |
| 252 | if (ce->addr_len == 16) | 256 | sprintf(ip, "%pISc", &ce->addr); |
| 253 | sprintf(ip, "%pI6c", ce->addr); | ||
| 254 | else | ||
| 255 | sprintf(ip, "%pI4c", ce->addr); | ||
| 256 | seq_printf(seq, "%-25s %u\n", ip, | 257 | seq_printf(seq, "%-25s %u\n", ip, |
| 257 | atomic_read(&ce->refcnt)); | 258 | atomic_read(&ce->refcnt)); |
| 258 | } | 259 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 2eaba0161cf8..35eb43c6bcbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h | |||
| @@ -14,8 +14,10 @@ struct clip_entry { | |||
| 14 | spinlock_t lock; /* Hold while modifying clip reference */ | 14 | spinlock_t lock; /* Hold while modifying clip reference */ |
| 15 | atomic_t refcnt; | 15 | atomic_t refcnt; |
| 16 | struct list_head list; | 16 | struct list_head list; |
| 17 | u32 addr[4]; | 17 | union { |
| 18 | int addr_len; | 18 | struct sockaddr_in addr; |
| 19 | struct sockaddr_in6 addr6; | ||
| 20 | }; | ||
| 19 | }; | 21 | }; |
| 20 | 22 | ||
| 21 | struct clip_tbl { | 23 | struct clip_tbl { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d6cda17efe6e..97842d03675b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); | |||
| 1103 | #define T4_MEMORY_WRITE 0 | 1103 | #define T4_MEMORY_WRITE 0 |
| 1104 | #define T4_MEMORY_READ 1 | 1104 | #define T4_MEMORY_READ 1 |
| 1105 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, | 1105 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, |
| 1106 | __be32 *buf, int dir); | 1106 | void *buf, int dir); |
| 1107 | static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, | 1107 | static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, |
| 1108 | u32 len, __be32 *buf) | 1108 | u32 len, __be32 *buf) |
| 1109 | { | 1109 | { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4d643b65265e..853c38997c82 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
| 449 | * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC | 449 | * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC |
| 450 | * @addr: address within indicated memory type | 450 | * @addr: address within indicated memory type |
| 451 | * @len: amount of memory to transfer | 451 | * @len: amount of memory to transfer |
| 452 | * @buf: host memory buffer | 452 | * @hbuf: host memory buffer |
| 453 | * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) | 453 | * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) |
| 454 | * | 454 | * |
| 455 | * Reads/writes an [almost] arbitrary memory region in the firmware: the | 455 | * Reads/writes an [almost] arbitrary memory region in the firmware: the |
| @@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) | |||
| 460 | * caller's responsibility to perform appropriate byte order conversions. | 460 | * caller's responsibility to perform appropriate byte order conversions. |
| 461 | */ | 461 | */ |
| 462 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | 462 | int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, |
| 463 | u32 len, __be32 *buf, int dir) | 463 | u32 len, void *hbuf, int dir) |
| 464 | { | 464 | { |
| 465 | u32 pos, offset, resid, memoffset; | 465 | u32 pos, offset, resid, memoffset; |
| 466 | u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; | 466 | u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; |
| 467 | u32 *buf; | ||
| 467 | 468 | ||
| 468 | /* Argument sanity checks ... | 469 | /* Argument sanity checks ... |
| 469 | */ | 470 | */ |
| 470 | if (addr & 0x3) | 471 | if (addr & 0x3 || (uintptr_t)hbuf & 0x3) |
| 471 | return -EINVAL; | 472 | return -EINVAL; |
| 473 | buf = (u32 *)hbuf; | ||
| 472 | 474 | ||
| 473 | /* It's convenient to be able to handle lengths which aren't a | 475 | /* It's convenient to be able to handle lengths which aren't a |
| 474 | * multiple of 32-bits because we often end up transferring files to | 476 | * multiple of 32-bits because we often end up transferring files to |
| @@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
| 532 | 534 | ||
| 533 | /* Transfer data to/from the adapter as long as there's an integral | 535 | /* Transfer data to/from the adapter as long as there's an integral |
| 534 | * number of 32-bit transfers to complete. | 536 | * number of 32-bit transfers to complete. |
| 537 | * | ||
| 538 | * A note on Endianness issues: | ||
| 539 | * | ||
| 540 | * The "register" reads and writes below from/to the PCI-E Memory | ||
| 541 | * Window invoke the standard adapter Big-Endian to PCI-E Link | ||
| 542 | * Little-Endian "swizzel." As a result, if we have the following | ||
| 543 | * data in adapter memory: | ||
| 544 | * | ||
| 545 | * Memory: ... | b0 | b1 | b2 | b3 | ... | ||
| 546 | * Address: i+0 i+1 i+2 i+3 | ||
| 547 | * | ||
| 548 | * Then a read of the adapter memory via the PCI-E Memory Window | ||
| 549 | * will yield: | ||
| 550 | * | ||
| 551 | * x = readl(i) | ||
| 552 | * 31 0 | ||
| 553 | * [ b3 | b2 | b1 | b0 ] | ||
| 554 | * | ||
| 555 | * If this value is stored into local memory on a Little-Endian system | ||
| 556 | * it will show up correctly in local memory as: | ||
| 557 | * | ||
| 558 | * ( ..., b0, b1, b2, b3, ... ) | ||
| 559 | * | ||
| 560 | * But on a Big-Endian system, the store will show up in memory | ||
| 561 | * incorrectly swizzled as: | ||
| 562 | * | ||
| 563 | * ( ..., b3, b2, b1, b0, ... ) | ||
| 564 | * | ||
| 565 | * So we need to account for this in the reads and writes to the | ||
| 566 | * PCI-E Memory Window below by undoing the register read/write | ||
| 567 | * swizzels. | ||
| 535 | */ | 568 | */ |
| 536 | while (len > 0) { | 569 | while (len > 0) { |
| 537 | if (dir == T4_MEMORY_READ) | 570 | if (dir == T4_MEMORY_READ) |
| 538 | *buf++ = (__force __be32) t4_read_reg(adap, | 571 | *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, |
| 539 | mem_base + offset); | 572 | mem_base + offset)); |
| 540 | else | 573 | else |
| 541 | t4_write_reg(adap, mem_base + offset, | 574 | t4_write_reg(adap, mem_base + offset, |
| 542 | (__force u32) *buf++); | 575 | (__force u32)cpu_to_le32(*buf++)); |
| 543 | offset += sizeof(__be32); | 576 | offset += sizeof(__be32); |
| 544 | len -= sizeof(__be32); | 577 | len -= sizeof(__be32); |
| 545 | 578 | ||
| @@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
| 568 | */ | 601 | */ |
| 569 | if (resid) { | 602 | if (resid) { |
| 570 | union { | 603 | union { |
| 571 | __be32 word; | 604 | u32 word; |
| 572 | char byte[4]; | 605 | char byte[4]; |
| 573 | } last; | 606 | } last; |
| 574 | unsigned char *bp; | 607 | unsigned char *bp; |
| 575 | int i; | 608 | int i; |
| 576 | 609 | ||
| 577 | if (dir == T4_MEMORY_READ) { | 610 | if (dir == T4_MEMORY_READ) { |
| 578 | last.word = (__force __be32) t4_read_reg(adap, | 611 | last.word = le32_to_cpu( |
| 579 | mem_base + offset); | 612 | (__force __le32)t4_read_reg(adap, |
| 613 | mem_base + offset)); | ||
| 580 | for (bp = (unsigned char *)buf, i = resid; i < 4; i++) | 614 | for (bp = (unsigned char *)buf, i = resid; i < 4; i++) |
| 581 | bp[i] = last.byte[i]; | 615 | bp[i] = last.byte[i]; |
| 582 | } else { | 616 | } else { |
| @@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
| 584 | for (i = resid; i < 4; i++) | 618 | for (i = resid; i < 4; i++) |
| 585 | last.byte[i] = 0; | 619 | last.byte[i] = 0; |
| 586 | t4_write_reg(adap, mem_base + offset, | 620 | t4_write_reg(adap, mem_base + offset, |
| 587 | (__force u32) last.word); | 621 | (__force u32)cpu_to_le32(last.word)); |
| 588 | } | 622 | } |
| 589 | } | 623 | } |
| 590 | 624 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9cbe038a388e..a5179bfcdc2c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
| @@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) | |||
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | if (ENIC_TEST_INTR(pba, notify_intr)) { | 274 | if (ENIC_TEST_INTR(pba, notify_intr)) { |
| 275 | vnic_intr_return_all_credits(&enic->intr[notify_intr]); | ||
| 276 | enic_notify_check(enic); | 275 | enic_notify_check(enic); |
| 276 | vnic_intr_return_all_credits(&enic->intr[notify_intr]); | ||
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | if (ENIC_TEST_INTR(pba, err_intr)) { | 279 | if (ENIC_TEST_INTR(pba, err_intr)) { |
| @@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) | |||
| 346 | struct enic *enic = data; | 346 | struct enic *enic = data; |
| 347 | unsigned int intr = enic_msix_notify_intr(enic); | 347 | unsigned int intr = enic_msix_notify_intr(enic); |
| 348 | 348 | ||
| 349 | vnic_intr_return_all_credits(&enic->intr[intr]); | ||
| 350 | enic_notify_check(enic); | 349 | enic_notify_check(enic); |
| 350 | vnic_intr_return_all_credits(&enic->intr[intr]); | ||
| 351 | 351 | ||
| 352 | return IRQ_HANDLED; | 352 | return IRQ_HANDLED; |
| 353 | } | 353 | } |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..99492b7e3713 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1597,7 +1597,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
| 1597 | writel(int_events, fep->hwp + FEC_IEVENT); | 1597 | writel(int_events, fep->hwp + FEC_IEVENT); |
| 1598 | fec_enet_collect_events(fep, int_events); | 1598 | fec_enet_collect_events(fep, int_events); |
| 1599 | 1599 | ||
| 1600 | if (fep->work_tx || fep->work_rx) { | 1600 | if ((fep->work_tx || fep->work_rx) && fep->link) { |
| 1601 | ret = IRQ_HANDLED; | 1601 | ret = IRQ_HANDLED; |
| 1602 | 1602 | ||
| 1603 | if (napi_schedule_prep(&fep->napi)) { | 1603 | if (napi_schedule_prep(&fep->napi)) { |
| @@ -3383,7 +3383,6 @@ fec_drv_remove(struct platform_device *pdev) | |||
| 3383 | regulator_disable(fep->reg_phy); | 3383 | regulator_disable(fep->reg_phy); |
| 3384 | if (fep->ptp_clock) | 3384 | if (fep->ptp_clock) |
| 3385 | ptp_clock_unregister(fep->ptp_clock); | 3385 | ptp_clock_unregister(fep->ptp_clock); |
| 3386 | fec_enet_clk_enable(ndev, false); | ||
| 3387 | of_node_put(fep->phy_node); | 3386 | of_node_put(fep->phy_node); |
| 3388 | free_netdev(ndev); | 3387 | free_netdev(ndev); |
| 3389 | 3388 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 43df78882e48..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, | |||
| 747 | return 0; | 747 | return 0; |
| 748 | } | 748 | } |
| 749 | 749 | ||
| 750 | static int gfar_of_group_count(struct device_node *np) | ||
| 751 | { | ||
| 752 | struct device_node *child; | ||
| 753 | int num = 0; | ||
| 754 | |||
| 755 | for_each_available_child_of_node(np, child) | ||
| 756 | if (!of_node_cmp(child->name, "queue-group")) | ||
| 757 | num++; | ||
| 758 | |||
| 759 | return num; | ||
| 760 | } | ||
| 761 | |||
| 750 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | 762 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
| 751 | { | 763 | { |
| 752 | const char *model; | 764 | const char *model; |
| @@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
| 784 | num_rx_qs = 1; | 796 | num_rx_qs = 1; |
| 785 | } else { /* MQ_MG_MODE */ | 797 | } else { /* MQ_MG_MODE */ |
| 786 | /* get the actual number of supported groups */ | 798 | /* get the actual number of supported groups */ |
| 787 | unsigned int num_grps = of_get_available_child_count(np); | 799 | unsigned int num_grps = gfar_of_group_count(np); |
| 788 | 800 | ||
| 789 | if (num_grps == 0 || num_grps > MAXGROUPS) { | 801 | if (num_grps == 0 || num_grps > MAXGROUPS) { |
| 790 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | 802 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", |
| @@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
| 851 | 863 | ||
| 852 | /* Parse and initialize group specific information */ | 864 | /* Parse and initialize group specific information */ |
| 853 | if (priv->mode == MQ_MG_MODE) { | 865 | if (priv->mode == MQ_MG_MODE) { |
| 854 | for_each_child_of_node(np, child) { | 866 | for_each_available_child_of_node(np, child) { |
| 867 | if (of_node_cmp(child->name, "queue-group")) | ||
| 868 | continue; | ||
| 869 | |||
| 855 | err = gfar_parse_group(child, priv, model); | 870 | err = gfar_parse_group(child, priv, model); |
| 856 | if (err) | 871 | if (err) |
| 857 | goto err_grp_init; | 872 | goto err_grp_init; |
| @@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev) | |||
| 3162 | struct phy_device *phydev = priv->phydev; | 3177 | struct phy_device *phydev = priv->phydev; |
| 3163 | 3178 | ||
| 3164 | if (unlikely(phydev->link != priv->oldlink || | 3179 | if (unlikely(phydev->link != priv->oldlink || |
| 3165 | phydev->duplex != priv->oldduplex || | 3180 | (phydev->link && (phydev->duplex != priv->oldduplex || |
| 3166 | phydev->speed != priv->oldspeed)) | 3181 | phydev->speed != priv->oldspeed)))) |
| 3167 | gfar_update_link_state(priv); | 3182 | gfar_update_link_state(priv); |
| 3168 | } | 3183 | } |
| 3169 | 3184 | ||
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index e8a1adb7a962..c05e50759621 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
| @@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev) | |||
| 3262 | device_remove_file(&dev->dev, &dev_attr_remove_port); | 3262 | device_remove_file(&dev->dev, &dev_attr_remove_port); |
| 3263 | } | 3263 | } |
| 3264 | 3264 | ||
| 3265 | static int ehea_reboot_notifier(struct notifier_block *nb, | ||
| 3266 | unsigned long action, void *unused) | ||
| 3267 | { | ||
| 3268 | if (action == SYS_RESTART) { | ||
| 3269 | pr_info("Reboot: freeing all eHEA resources\n"); | ||
| 3270 | ibmebus_unregister_driver(&ehea_driver); | ||
| 3271 | } | ||
| 3272 | return NOTIFY_DONE; | ||
| 3273 | } | ||
| 3274 | |||
| 3275 | static struct notifier_block ehea_reboot_nb = { | ||
| 3276 | .notifier_call = ehea_reboot_notifier, | ||
| 3277 | }; | ||
| 3278 | |||
| 3279 | static int ehea_mem_notifier(struct notifier_block *nb, | ||
| 3280 | unsigned long action, void *data) | ||
| 3281 | { | ||
| 3282 | int ret = NOTIFY_BAD; | ||
| 3283 | struct memory_notify *arg = data; | ||
| 3284 | |||
| 3285 | mutex_lock(&dlpar_mem_lock); | ||
| 3286 | |||
| 3287 | switch (action) { | ||
| 3288 | case MEM_CANCEL_OFFLINE: | ||
| 3289 | pr_info("memory offlining canceled"); | ||
| 3290 | /* Fall through: re-add canceled memory block */ | ||
| 3291 | |||
| 3292 | case MEM_ONLINE: | ||
| 3293 | pr_info("memory is going online"); | ||
| 3294 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
| 3295 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
| 3296 | goto out_unlock; | ||
| 3297 | ehea_rereg_mrs(); | ||
| 3298 | break; | ||
| 3299 | |||
| 3300 | case MEM_GOING_OFFLINE: | ||
| 3301 | pr_info("memory is going offline"); | ||
| 3302 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
| 3303 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
| 3304 | goto out_unlock; | ||
| 3305 | ehea_rereg_mrs(); | ||
| 3306 | break; | ||
| 3307 | |||
| 3308 | default: | ||
| 3309 | break; | ||
| 3310 | } | ||
| 3311 | |||
| 3312 | ehea_update_firmware_handles(); | ||
| 3313 | ret = NOTIFY_OK; | ||
| 3314 | |||
| 3315 | out_unlock: | ||
| 3316 | mutex_unlock(&dlpar_mem_lock); | ||
| 3317 | return ret; | ||
| 3318 | } | ||
| 3319 | |||
| 3320 | static struct notifier_block ehea_mem_nb = { | ||
| 3321 | .notifier_call = ehea_mem_notifier, | ||
| 3322 | }; | ||
| 3323 | |||
| 3324 | static void ehea_crash_handler(void) | ||
| 3325 | { | ||
| 3326 | int i; | ||
| 3327 | |||
| 3328 | if (ehea_fw_handles.arr) | ||
| 3329 | for (i = 0; i < ehea_fw_handles.num_entries; i++) | ||
| 3330 | ehea_h_free_resource(ehea_fw_handles.arr[i].adh, | ||
| 3331 | ehea_fw_handles.arr[i].fwh, | ||
| 3332 | FORCE_FREE); | ||
| 3333 | |||
| 3334 | if (ehea_bcmc_regs.arr) | ||
| 3335 | for (i = 0; i < ehea_bcmc_regs.num_entries; i++) | ||
| 3336 | ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, | ||
| 3337 | ehea_bcmc_regs.arr[i].port_id, | ||
| 3338 | ehea_bcmc_regs.arr[i].reg_type, | ||
| 3339 | ehea_bcmc_regs.arr[i].macaddr, | ||
| 3340 | 0, H_DEREG_BCMC); | ||
| 3341 | } | ||
| 3342 | |||
| 3343 | static atomic_t ehea_memory_hooks_registered; | ||
| 3344 | |||
| 3345 | /* Register memory hooks on probe of first adapter */ | ||
| 3346 | static int ehea_register_memory_hooks(void) | ||
| 3347 | { | ||
| 3348 | int ret = 0; | ||
| 3349 | |||
| 3350 | if (atomic_inc_and_test(&ehea_memory_hooks_registered)) | ||
| 3351 | return 0; | ||
| 3352 | |||
| 3353 | ret = ehea_create_busmap(); | ||
| 3354 | if (ret) { | ||
| 3355 | pr_info("ehea_create_busmap failed\n"); | ||
| 3356 | goto out; | ||
| 3357 | } | ||
| 3358 | |||
| 3359 | ret = register_reboot_notifier(&ehea_reboot_nb); | ||
| 3360 | if (ret) { | ||
| 3361 | pr_info("register_reboot_notifier failed\n"); | ||
| 3362 | goto out; | ||
| 3363 | } | ||
| 3364 | |||
| 3365 | ret = register_memory_notifier(&ehea_mem_nb); | ||
| 3366 | if (ret) { | ||
| 3367 | pr_info("register_memory_notifier failed\n"); | ||
| 3368 | goto out2; | ||
| 3369 | } | ||
| 3370 | |||
| 3371 | ret = crash_shutdown_register(ehea_crash_handler); | ||
| 3372 | if (ret) { | ||
| 3373 | pr_info("crash_shutdown_register failed\n"); | ||
| 3374 | goto out3; | ||
| 3375 | } | ||
| 3376 | |||
| 3377 | return 0; | ||
| 3378 | |||
| 3379 | out3: | ||
| 3380 | unregister_memory_notifier(&ehea_mem_nb); | ||
| 3381 | out2: | ||
| 3382 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
| 3383 | out: | ||
| 3384 | return ret; | ||
| 3385 | } | ||
| 3386 | |||
| 3387 | static void ehea_unregister_memory_hooks(void) | ||
| 3388 | { | ||
| 3389 | if (atomic_read(&ehea_memory_hooks_registered)) | ||
| 3390 | return; | ||
| 3391 | |||
| 3392 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
| 3393 | if (crash_shutdown_unregister(ehea_crash_handler)) | ||
| 3394 | pr_info("failed unregistering crash handler\n"); | ||
| 3395 | unregister_memory_notifier(&ehea_mem_nb); | ||
| 3396 | } | ||
| 3397 | |||
| 3265 | static int ehea_probe_adapter(struct platform_device *dev) | 3398 | static int ehea_probe_adapter(struct platform_device *dev) |
| 3266 | { | 3399 | { |
| 3267 | struct ehea_adapter *adapter; | 3400 | struct ehea_adapter *adapter; |
| @@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev) | |||
| 3269 | int ret; | 3402 | int ret; |
| 3270 | int i; | 3403 | int i; |
| 3271 | 3404 | ||
| 3405 | ret = ehea_register_memory_hooks(); | ||
| 3406 | if (ret) | ||
| 3407 | return ret; | ||
| 3408 | |||
| 3272 | if (!dev || !dev->dev.of_node) { | 3409 | if (!dev || !dev->dev.of_node) { |
| 3273 | pr_err("Invalid ibmebus device probed\n"); | 3410 | pr_err("Invalid ibmebus device probed\n"); |
| 3274 | return -EINVAL; | 3411 | return -EINVAL; |
| @@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev) | |||
| 3392 | return 0; | 3529 | return 0; |
| 3393 | } | 3530 | } |
| 3394 | 3531 | ||
| 3395 | static void ehea_crash_handler(void) | ||
| 3396 | { | ||
| 3397 | int i; | ||
| 3398 | |||
| 3399 | if (ehea_fw_handles.arr) | ||
| 3400 | for (i = 0; i < ehea_fw_handles.num_entries; i++) | ||
| 3401 | ehea_h_free_resource(ehea_fw_handles.arr[i].adh, | ||
| 3402 | ehea_fw_handles.arr[i].fwh, | ||
| 3403 | FORCE_FREE); | ||
| 3404 | |||
| 3405 | if (ehea_bcmc_regs.arr) | ||
| 3406 | for (i = 0; i < ehea_bcmc_regs.num_entries; i++) | ||
| 3407 | ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, | ||
| 3408 | ehea_bcmc_regs.arr[i].port_id, | ||
| 3409 | ehea_bcmc_regs.arr[i].reg_type, | ||
| 3410 | ehea_bcmc_regs.arr[i].macaddr, | ||
| 3411 | 0, H_DEREG_BCMC); | ||
| 3412 | } | ||
| 3413 | |||
| 3414 | static int ehea_mem_notifier(struct notifier_block *nb, | ||
| 3415 | unsigned long action, void *data) | ||
| 3416 | { | ||
| 3417 | int ret = NOTIFY_BAD; | ||
| 3418 | struct memory_notify *arg = data; | ||
| 3419 | |||
| 3420 | mutex_lock(&dlpar_mem_lock); | ||
| 3421 | |||
| 3422 | switch (action) { | ||
| 3423 | case MEM_CANCEL_OFFLINE: | ||
| 3424 | pr_info("memory offlining canceled"); | ||
| 3425 | /* Readd canceled memory block */ | ||
| 3426 | case MEM_ONLINE: | ||
| 3427 | pr_info("memory is going online"); | ||
| 3428 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
| 3429 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
| 3430 | goto out_unlock; | ||
| 3431 | ehea_rereg_mrs(); | ||
| 3432 | break; | ||
| 3433 | case MEM_GOING_OFFLINE: | ||
| 3434 | pr_info("memory is going offline"); | ||
| 3435 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | ||
| 3436 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | ||
| 3437 | goto out_unlock; | ||
| 3438 | ehea_rereg_mrs(); | ||
| 3439 | break; | ||
| 3440 | default: | ||
| 3441 | break; | ||
| 3442 | } | ||
| 3443 | |||
| 3444 | ehea_update_firmware_handles(); | ||
| 3445 | ret = NOTIFY_OK; | ||
| 3446 | |||
| 3447 | out_unlock: | ||
| 3448 | mutex_unlock(&dlpar_mem_lock); | ||
| 3449 | return ret; | ||
| 3450 | } | ||
| 3451 | |||
| 3452 | static struct notifier_block ehea_mem_nb = { | ||
| 3453 | .notifier_call = ehea_mem_notifier, | ||
| 3454 | }; | ||
| 3455 | |||
| 3456 | static int ehea_reboot_notifier(struct notifier_block *nb, | ||
| 3457 | unsigned long action, void *unused) | ||
| 3458 | { | ||
| 3459 | if (action == SYS_RESTART) { | ||
| 3460 | pr_info("Reboot: freeing all eHEA resources\n"); | ||
| 3461 | ibmebus_unregister_driver(&ehea_driver); | ||
| 3462 | } | ||
| 3463 | return NOTIFY_DONE; | ||
| 3464 | } | ||
| 3465 | |||
| 3466 | static struct notifier_block ehea_reboot_nb = { | ||
| 3467 | .notifier_call = ehea_reboot_notifier, | ||
| 3468 | }; | ||
| 3469 | |||
| 3470 | static int check_module_parm(void) | 3532 | static int check_module_parm(void) |
| 3471 | { | 3533 | { |
| 3472 | int ret = 0; | 3534 | int ret = 0; |
| @@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void) | |||
| 3520 | if (ret) | 3582 | if (ret) |
| 3521 | goto out; | 3583 | goto out; |
| 3522 | 3584 | ||
| 3523 | ret = ehea_create_busmap(); | ||
| 3524 | if (ret) | ||
| 3525 | goto out; | ||
| 3526 | |||
| 3527 | ret = register_reboot_notifier(&ehea_reboot_nb); | ||
| 3528 | if (ret) | ||
| 3529 | pr_info("failed registering reboot notifier\n"); | ||
| 3530 | |||
| 3531 | ret = register_memory_notifier(&ehea_mem_nb); | ||
| 3532 | if (ret) | ||
| 3533 | pr_info("failed registering memory remove notifier\n"); | ||
| 3534 | |||
| 3535 | ret = crash_shutdown_register(ehea_crash_handler); | ||
| 3536 | if (ret) | ||
| 3537 | pr_info("failed registering crash handler\n"); | ||
| 3538 | |||
| 3539 | ret = ibmebus_register_driver(&ehea_driver); | 3585 | ret = ibmebus_register_driver(&ehea_driver); |
| 3540 | if (ret) { | 3586 | if (ret) { |
| 3541 | pr_err("failed registering eHEA device driver on ebus\n"); | 3587 | pr_err("failed registering eHEA device driver on ebus\n"); |
| 3542 | goto out2; | 3588 | goto out; |
| 3543 | } | 3589 | } |
| 3544 | 3590 | ||
| 3545 | ret = driver_create_file(&ehea_driver.driver, | 3591 | ret = driver_create_file(&ehea_driver.driver, |
| @@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void) | |||
| 3547 | if (ret) { | 3593 | if (ret) { |
| 3548 | pr_err("failed to register capabilities attribute, ret=%d\n", | 3594 | pr_err("failed to register capabilities attribute, ret=%d\n", |
| 3549 | ret); | 3595 | ret); |
| 3550 | goto out3; | 3596 | goto out2; |
| 3551 | } | 3597 | } |
| 3552 | 3598 | ||
| 3553 | return ret; | 3599 | return ret; |
| 3554 | 3600 | ||
| 3555 | out3: | ||
| 3556 | ibmebus_unregister_driver(&ehea_driver); | ||
| 3557 | out2: | 3601 | out2: |
| 3558 | unregister_memory_notifier(&ehea_mem_nb); | 3602 | ibmebus_unregister_driver(&ehea_driver); |
| 3559 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
| 3560 | crash_shutdown_unregister(ehea_crash_handler); | ||
| 3561 | out: | 3603 | out: |
| 3562 | return ret; | 3604 | return ret; |
| 3563 | } | 3605 | } |
| 3564 | 3606 | ||
| 3565 | static void __exit ehea_module_exit(void) | 3607 | static void __exit ehea_module_exit(void) |
| 3566 | { | 3608 | { |
| 3567 | int ret; | ||
| 3568 | |||
| 3569 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); | 3609 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); |
| 3570 | ibmebus_unregister_driver(&ehea_driver); | 3610 | ibmebus_unregister_driver(&ehea_driver); |
| 3571 | unregister_reboot_notifier(&ehea_reboot_nb); | 3611 | ehea_unregister_memory_hooks(); |
| 3572 | ret = crash_shutdown_unregister(ehea_crash_handler); | ||
| 3573 | if (ret) | ||
| 3574 | pr_info("failed unregistering crash handler\n"); | ||
| 3575 | unregister_memory_notifier(&ehea_mem_nb); | ||
| 3576 | kfree(ehea_fw_handles.arr); | 3612 | kfree(ehea_fw_handles.arr); |
| 3577 | kfree(ehea_bcmc_regs.arr); | 3613 | kfree(ehea_bcmc_regs.arr); |
| 3578 | ehea_destroy_busmap(); | 3614 | ehea_destroy_busmap(); |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 21978cc019e7..072426a72745 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
| @@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | |||
| 1327 | return ret; | 1327 | return ret; |
| 1328 | } | 1328 | } |
| 1329 | 1329 | ||
| 1330 | static int ibmveth_set_mac_addr(struct net_device *dev, void *p) | ||
| 1331 | { | ||
| 1332 | struct ibmveth_adapter *adapter = netdev_priv(dev); | ||
| 1333 | struct sockaddr *addr = p; | ||
| 1334 | u64 mac_address; | ||
| 1335 | int rc; | ||
| 1336 | |||
| 1337 | if (!is_valid_ether_addr(addr->sa_data)) | ||
| 1338 | return -EADDRNOTAVAIL; | ||
| 1339 | |||
| 1340 | mac_address = ibmveth_encode_mac_addr(addr->sa_data); | ||
| 1341 | rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); | ||
| 1342 | if (rc) { | ||
| 1343 | netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); | ||
| 1344 | return rc; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | ether_addr_copy(dev->dev_addr, addr->sa_data); | ||
| 1348 | |||
| 1349 | return 0; | ||
| 1350 | } | ||
| 1351 | |||
| 1330 | static const struct net_device_ops ibmveth_netdev_ops = { | 1352 | static const struct net_device_ops ibmveth_netdev_ops = { |
| 1331 | .ndo_open = ibmveth_open, | 1353 | .ndo_open = ibmveth_open, |
| 1332 | .ndo_stop = ibmveth_close, | 1354 | .ndo_stop = ibmveth_close, |
| @@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { | |||
| 1337 | .ndo_fix_features = ibmveth_fix_features, | 1359 | .ndo_fix_features = ibmveth_fix_features, |
| 1338 | .ndo_set_features = ibmveth_set_features, | 1360 | .ndo_set_features = ibmveth_set_features, |
| 1339 | .ndo_validate_addr = eth_validate_addr, | 1361 | .ndo_validate_addr = eth_validate_addr, |
| 1340 | .ndo_set_mac_address = eth_mac_addr, | 1362 | .ndo_set_mac_address = ibmveth_set_mac_addr, |
| 1341 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1363 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1342 | .ndo_poll_controller = ibmveth_poll_controller, | 1364 | .ndo_poll_controller = ibmveth_poll_controller, |
| 1343 | #endif | 1365 | #endif |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 11a9ffebf8d8..6aea65dae5ed 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c | |||
| @@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) | |||
| 868 | * The grst delay value is in 100ms units, and we'll wait a | 868 | * The grst delay value is in 100ms units, and we'll wait a |
| 869 | * couple counts longer to be sure we don't just miss the end. | 869 | * couple counts longer to be sure we don't just miss the end. |
| 870 | */ | 870 | */ |
| 871 | grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK | 871 | grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & |
| 872 | >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; | 872 | I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> |
| 873 | I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; | ||
| 873 | for (cnt = 0; cnt < grst_del + 2; cnt++) { | 874 | for (cnt = 0; cnt < grst_del + 2; cnt++) { |
| 874 | reg = rd32(hw, I40E_GLGEN_RSTAT); | 875 | reg = rd32(hw, I40E_GLGEN_RSTAT); |
| 875 | if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) | 876 | if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) |
| @@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, | |||
| 2846 | 2847 | ||
| 2847 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); | 2848 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); |
| 2848 | 2849 | ||
| 2849 | if (!status) | 2850 | if (!status && filter_index) |
| 2850 | *filter_index = resp->index; | 2851 | *filter_index = resp->index; |
| 2851 | 2852 | ||
| 2852 | return status; | 2853 | return status; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 183dcb63ce98..a11c70ca5a28 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | |||
| @@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) | |||
| 40 | u32 val; | 40 | u32 val; |
| 41 | 41 | ||
| 42 | val = rd32(hw, I40E_PRTDCB_GENC); | 42 | val = rd32(hw, I40E_PRTDCB_GENC); |
| 43 | *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> | 43 | *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> |
| 44 | I40E_PRTDCB_GENC_PFCLDA_SHIFT); | 44 | I40E_PRTDCB_GENC_PFCLDA_SHIFT); |
| 45 | } | 45 | } |
| 46 | 46 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 61236f983971..c17ee77100d3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
| @@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
| 989 | if (!cmd_buf) | 989 | if (!cmd_buf) |
| 990 | return count; | 990 | return count; |
| 991 | bytes_not_copied = copy_from_user(cmd_buf, buffer, count); | 991 | bytes_not_copied = copy_from_user(cmd_buf, buffer, count); |
| 992 | if (bytes_not_copied < 0) | 992 | if (bytes_not_copied < 0) { |
| 993 | kfree(cmd_buf); | ||
| 993 | return bytes_not_copied; | 994 | return bytes_not_copied; |
| 995 | } | ||
| 994 | if (bytes_not_copied > 0) | 996 | if (bytes_not_copied > 0) |
| 995 | count -= bytes_not_copied; | 997 | count -= bytes_not_copied; |
| 996 | cmd_buf[count] = '\0'; | 998 | cmd_buf[count] = '\0'; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cbe281be1c9f..dadda3c5d658 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, | |||
| 1512 | vsi->tc_config.numtc = numtc; | 1512 | vsi->tc_config.numtc = numtc; |
| 1513 | vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; | 1513 | vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; |
| 1514 | /* Number of queues per enabled TC */ | 1514 | /* Number of queues per enabled TC */ |
| 1515 | num_tc_qps = vsi->alloc_queue_pairs/numtc; | 1515 | /* In MFP case we can have a much lower count of MSIx |
| 1516 | * vectors available and so we need to lower the used | ||
| 1517 | * q count. | ||
| 1518 | */ | ||
| 1519 | qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); | ||
| 1520 | num_tc_qps = qcount / numtc; | ||
| 1516 | num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); | 1521 | num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); |
| 1517 | 1522 | ||
| 1518 | /* Setup queue offset/count for all TCs for given VSI */ | 1523 | /* Setup queue offset/count for all TCs for given VSI */ |
| @@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) | |||
| 2684 | u16 qoffset, qcount; | 2689 | u16 qoffset, qcount; |
| 2685 | int i, n; | 2690 | int i, n; |
| 2686 | 2691 | ||
| 2687 | if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) | 2692 | if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { |
| 2688 | return; | 2693 | /* Reset the TC information */ |
| 2694 | for (i = 0; i < vsi->num_queue_pairs; i++) { | ||
| 2695 | rx_ring = vsi->rx_rings[i]; | ||
| 2696 | tx_ring = vsi->tx_rings[i]; | ||
| 2697 | rx_ring->dcb_tc = 0; | ||
| 2698 | tx_ring->dcb_tc = 0; | ||
| 2699 | } | ||
| 2700 | } | ||
| 2689 | 2701 | ||
| 2690 | for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { | 2702 | for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { |
| 2691 | if (!(vsi->tc_config.enabled_tc & (1 << n))) | 2703 | if (!(vsi->tc_config.enabled_tc & (1 << n))) |
| @@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) | |||
| 3830 | { | 3842 | { |
| 3831 | int i; | 3843 | int i; |
| 3832 | 3844 | ||
| 3845 | i40e_stop_misc_vector(pf); | ||
| 3846 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | ||
| 3847 | synchronize_irq(pf->msix_entries[0].vector); | ||
| 3848 | free_irq(pf->msix_entries[0].vector, pf); | ||
| 3849 | } | ||
| 3850 | |||
| 3833 | i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); | 3851 | i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); |
| 3834 | for (i = 0; i < pf->num_alloc_vsi; i++) | 3852 | for (i = 0; i < pf->num_alloc_vsi; i++) |
| 3835 | if (pf->vsi[i]) | 3853 | if (pf->vsi[i]) |
| @@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, | |||
| 5254 | 5272 | ||
| 5255 | /* Wait for the PF's Tx queues to be disabled */ | 5273 | /* Wait for the PF's Tx queues to be disabled */ |
| 5256 | ret = i40e_pf_wait_txq_disabled(pf); | 5274 | ret = i40e_pf_wait_txq_disabled(pf); |
| 5257 | if (!ret) | 5275 | if (ret) { |
| 5276 | /* Schedule PF reset to recover */ | ||
| 5277 | set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); | ||
| 5278 | i40e_service_event_schedule(pf); | ||
| 5279 | } else { | ||
| 5258 | i40e_pf_unquiesce_all_vsi(pf); | 5280 | i40e_pf_unquiesce_all_vsi(pf); |
| 5281 | } | ||
| 5282 | |||
| 5259 | exit: | 5283 | exit: |
| 5260 | return ret; | 5284 | return ret; |
| 5261 | } | 5285 | } |
| @@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) | |||
| 5587 | int i, v; | 5611 | int i, v; |
| 5588 | 5612 | ||
| 5589 | /* If we're down or resetting, just bail */ | 5613 | /* If we're down or resetting, just bail */ |
| 5590 | if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) | 5614 | if (test_bit(__I40E_DOWN, &pf->state) || |
| 5615 | test_bit(__I40E_CONFIG_BUSY, &pf->state)) | ||
| 5591 | return; | 5616 | return; |
| 5592 | 5617 | ||
| 5593 | /* for each VSI/netdev | 5618 | /* for each VSI/netdev |
| @@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev) | |||
| 9533 | set_bit(__I40E_DOWN, &pf->state); | 9558 | set_bit(__I40E_DOWN, &pf->state); |
| 9534 | del_timer_sync(&pf->service_timer); | 9559 | del_timer_sync(&pf->service_timer); |
| 9535 | cancel_work_sync(&pf->service_task); | 9560 | cancel_work_sync(&pf->service_task); |
| 9561 | i40e_fdir_teardown(pf); | ||
| 9536 | 9562 | ||
| 9537 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { | 9563 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { |
| 9538 | i40e_free_vfs(pf); | 9564 | i40e_free_vfs(pf); |
| @@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev) | |||
| 9559 | if (pf->vsi[pf->lan_vsi]) | 9585 | if (pf->vsi[pf->lan_vsi]) |
| 9560 | i40e_vsi_release(pf->vsi[pf->lan_vsi]); | 9586 | i40e_vsi_release(pf->vsi[pf->lan_vsi]); |
| 9561 | 9587 | ||
| 9562 | i40e_stop_misc_vector(pf); | ||
| 9563 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) { | ||
| 9564 | synchronize_irq(pf->msix_entries[0].vector); | ||
| 9565 | free_irq(pf->msix_entries[0].vector, pf); | ||
| 9566 | } | ||
| 9567 | |||
| 9568 | /* shutdown and destroy the HMC */ | 9588 | /* shutdown and destroy the HMC */ |
| 9569 | if (pf->hw.hmc.hmc_obj) { | 9589 | if (pf->hw.hmc.hmc_obj) { |
| 9570 | ret_code = i40e_shutdown_lan_hmc(&pf->hw); | 9590 | ret_code = i40e_shutdown_lan_hmc(&pf->hw); |
| @@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev) | |||
| 9718 | wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); | 9738 | wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); |
| 9719 | wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); | 9739 | wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); |
| 9720 | 9740 | ||
| 9741 | i40e_clear_interrupt_scheme(pf); | ||
| 9742 | |||
| 9721 | if (system_state == SYSTEM_POWER_OFF) { | 9743 | if (system_state == SYSTEM_POWER_OFF) { |
| 9722 | pci_wake_from_d3(pdev, pf->wol_en); | 9744 | pci_wake_from_d3(pdev, pf->wol_en); |
| 9723 | pci_set_power_state(pdev, PCI_D3hot); | 9745 | pci_set_power_state(pdev, PCI_D3hot); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 3e70f2e45a47..5defe0d63514 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c | |||
| @@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, | |||
| 679 | { | 679 | { |
| 680 | i40e_status status; | 680 | i40e_status status; |
| 681 | enum i40e_nvmupd_cmd upd_cmd; | 681 | enum i40e_nvmupd_cmd upd_cmd; |
| 682 | bool retry_attempt = false; | ||
| 682 | 683 | ||
| 683 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); | 684 | upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); |
| 684 | 685 | ||
| 686 | retry: | ||
| 685 | switch (upd_cmd) { | 687 | switch (upd_cmd) { |
| 686 | case I40E_NVMUPD_WRITE_CON: | 688 | case I40E_NVMUPD_WRITE_CON: |
| 687 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); | 689 | status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); |
| @@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, | |||
| 725 | *errno = -ESRCH; | 727 | *errno = -ESRCH; |
| 726 | break; | 728 | break; |
| 727 | } | 729 | } |
| 730 | |||
| 731 | /* In some circumstances, a multi-write transaction takes longer | ||
| 732 | * than the default 3 minute timeout on the write semaphore. If | ||
| 733 | * the write failed with an EBUSY status, this is likely the problem, | ||
| 734 | * so here we try to reacquire the semaphore then retry the write. | ||
| 735 | * We only do one retry, then give up. | ||
| 736 | */ | ||
| 737 | if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && | ||
| 738 | !retry_attempt) { | ||
| 739 | i40e_status old_status = status; | ||
| 740 | u32 old_asq_status = hw->aq.asq_last_status; | ||
| 741 | u32 gtime; | ||
| 742 | |||
| 743 | gtime = rd32(hw, I40E_GLVFGEN_TIMER); | ||
| 744 | if (gtime >= hw->nvm.hw_semaphore_timeout) { | ||
| 745 | i40e_debug(hw, I40E_DEBUG_ALL, | ||
| 746 | "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", | ||
| 747 | gtime, hw->nvm.hw_semaphore_timeout); | ||
| 748 | i40e_release_nvm(hw); | ||
| 749 | status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); | ||
| 750 | if (status) { | ||
| 751 | i40e_debug(hw, I40E_DEBUG_ALL, | ||
| 752 | "NVMUPD: write semaphore reacquire failed aq_err = %d\n", | ||
| 753 | hw->aq.asq_last_status); | ||
| 754 | status = old_status; | ||
| 755 | hw->aq.asq_last_status = old_asq_status; | ||
| 756 | } else { | ||
| 757 | retry_attempt = true; | ||
| 758 | goto retry; | ||
| 759 | } | ||
| 760 | } | ||
| 761 | } | ||
| 762 | |||
| 728 | return status; | 763 | return status; |
| 729 | } | 764 | } |
| 730 | 765 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2206d2d36f0f..bbf1b1247ac4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) | |||
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | /** | 588 | /** |
| 589 | * i40e_get_head - Retrieve head from head writeback | ||
| 590 | * @tx_ring: tx ring to fetch head of | ||
| 591 | * | ||
| 592 | * Returns value of Tx ring head based on value stored | ||
| 593 | * in head write-back location | ||
| 594 | **/ | ||
| 595 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
| 596 | { | ||
| 597 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
| 598 | |||
| 599 | return le32_to_cpu(*(volatile __le32 *)head); | ||
| 600 | } | ||
| 601 | |||
| 602 | /** | ||
| 589 | * i40e_get_tx_pending - how many tx descriptors not processed | 603 | * i40e_get_tx_pending - how many tx descriptors not processed |
| 590 | * @tx_ring: the ring of descriptors | 604 | * @tx_ring: the ring of descriptors |
| 591 | * | 605 | * |
| @@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) | |||
| 594 | **/ | 608 | **/ |
| 595 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) | 609 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) |
| 596 | { | 610 | { |
| 597 | u32 ntu = ((ring->next_to_clean <= ring->next_to_use) | 611 | u32 head, tail; |
| 598 | ? ring->next_to_use | 612 | |
| 599 | : ring->next_to_use + ring->count); | 613 | head = i40e_get_head(ring); |
| 600 | return ntu - ring->next_to_clean; | 614 | tail = readl(ring->tail); |
| 615 | |||
| 616 | if (head != tail) | ||
| 617 | return (head < tail) ? | ||
| 618 | tail - head : (tail + ring->count - head); | ||
| 619 | |||
| 620 | return 0; | ||
| 601 | } | 621 | } |
| 602 | 622 | ||
| 603 | /** | 623 | /** |
| @@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) | |||
| 606 | **/ | 626 | **/ |
| 607 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | 627 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) |
| 608 | { | 628 | { |
| 629 | u32 tx_done = tx_ring->stats.packets; | ||
| 630 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | ||
| 609 | u32 tx_pending = i40e_get_tx_pending(tx_ring); | 631 | u32 tx_pending = i40e_get_tx_pending(tx_ring); |
| 610 | struct i40e_pf *pf = tx_ring->vsi->back; | 632 | struct i40e_pf *pf = tx_ring->vsi->back; |
| 611 | bool ret = false; | 633 | bool ret = false; |
| @@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | |||
| 623 | * run the check_tx_hang logic with a transmit completion | 645 | * run the check_tx_hang logic with a transmit completion |
| 624 | * pending but without time to complete it yet. | 646 | * pending but without time to complete it yet. |
| 625 | */ | 647 | */ |
| 626 | if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && | 648 | if ((tx_done_old == tx_done) && tx_pending) { |
| 627 | (tx_pending >= I40E_MIN_DESC_PENDING)) { | ||
| 628 | /* make sure it is true for two checks in a row */ | 649 | /* make sure it is true for two checks in a row */ |
| 629 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, | 650 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, |
| 630 | &tx_ring->state); | 651 | &tx_ring->state); |
| 631 | } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && | 652 | } else if (tx_done_old == tx_done && |
| 632 | (tx_pending < I40E_MIN_DESC_PENDING) && | 653 | (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { |
| 633 | (tx_pending > 0)) { | ||
| 634 | if (I40E_DEBUG_FLOW & pf->hw.debug_mask) | 654 | if (I40E_DEBUG_FLOW & pf->hw.debug_mask) |
| 635 | dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", | 655 | dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", |
| 636 | tx_pending, tx_ring->queue_index); | 656 | tx_pending, tx_ring->queue_index); |
| 637 | pf->tx_sluggish_count++; | 657 | pf->tx_sluggish_count++; |
| 638 | } else { | 658 | } else { |
| 639 | /* update completed stats and disarm the hang check */ | 659 | /* update completed stats and disarm the hang check */ |
| 640 | tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; | 660 | tx_ring->tx_stats.tx_done_old = tx_done; |
| 641 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); | 661 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); |
| 642 | } | 662 | } |
| 643 | 663 | ||
| 644 | return ret; | 664 | return ret; |
| 645 | } | 665 | } |
| 646 | 666 | ||
| 647 | /** | ||
| 648 | * i40e_get_head - Retrieve head from head writeback | ||
| 649 | * @tx_ring: tx ring to fetch head of | ||
| 650 | * | ||
| 651 | * Returns value of Tx ring head based on value stored | ||
| 652 | * in head write-back location | ||
| 653 | **/ | ||
| 654 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
| 655 | { | ||
| 656 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
| 657 | |||
| 658 | return le32_to_cpu(*(volatile __le32 *)head); | ||
| 659 | } | ||
| 660 | |||
| 661 | #define WB_STRIDE 0x3 | 667 | #define WB_STRIDE 0x3 |
| 662 | 668 | ||
| 663 | /** | 669 | /** |
| @@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | |||
| 2140 | } | 2146 | } |
| 2141 | 2147 | ||
| 2142 | /** | 2148 | /** |
| 2149 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | ||
| 2150 | * @skb: send buffer | ||
| 2151 | * @tx_flags: collected send information | ||
| 2152 | * @hdr_len: size of the packet header | ||
| 2153 | * | ||
| 2154 | * Note: Our HW can't scatter-gather more than 8 fragments to build | ||
| 2155 | * a packet on the wire and so we need to figure out the cases where we | ||
| 2156 | * need to linearize the skb. | ||
| 2157 | **/ | ||
| 2158 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | ||
| 2159 | const u8 hdr_len) | ||
| 2160 | { | ||
| 2161 | struct skb_frag_struct *frag; | ||
| 2162 | bool linearize = false; | ||
| 2163 | unsigned int size = 0; | ||
| 2164 | u16 num_frags; | ||
| 2165 | u16 gso_segs; | ||
| 2166 | |||
| 2167 | num_frags = skb_shinfo(skb)->nr_frags; | ||
| 2168 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
| 2169 | |||
| 2170 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | ||
| 2171 | u16 j = 1; | ||
| 2172 | |||
| 2173 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | ||
| 2174 | goto linearize_chk_done; | ||
| 2175 | /* try the simple math, if we have too many frags per segment */ | ||
| 2176 | if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > | ||
| 2177 | I40E_MAX_BUFFER_TXD) { | ||
| 2178 | linearize = true; | ||
| 2179 | goto linearize_chk_done; | ||
| 2180 | } | ||
| 2181 | frag = &skb_shinfo(skb)->frags[0]; | ||
| 2182 | size = hdr_len; | ||
| 2183 | /* we might still have more fragments per segment */ | ||
| 2184 | do { | ||
| 2185 | size += skb_frag_size(frag); | ||
| 2186 | frag++; j++; | ||
| 2187 | if (j == I40E_MAX_BUFFER_TXD) { | ||
| 2188 | if (size < skb_shinfo(skb)->gso_size) { | ||
| 2189 | linearize = true; | ||
| 2190 | break; | ||
| 2191 | } | ||
| 2192 | j = 1; | ||
| 2193 | size -= skb_shinfo(skb)->gso_size; | ||
| 2194 | if (size) | ||
| 2195 | j++; | ||
| 2196 | size += hdr_len; | ||
| 2197 | } | ||
| 2198 | num_frags--; | ||
| 2199 | } while (num_frags); | ||
| 2200 | } else { | ||
| 2201 | if (num_frags >= I40E_MAX_BUFFER_TXD) | ||
| 2202 | linearize = true; | ||
| 2203 | } | ||
| 2204 | |||
| 2205 | linearize_chk_done: | ||
| 2206 | return linearize; | ||
| 2207 | } | ||
| 2208 | |||
| 2209 | /** | ||
| 2143 | * i40e_tx_map - Build the Tx descriptor | 2210 | * i40e_tx_map - Build the Tx descriptor |
| 2144 | * @tx_ring: ring to send buffer on | 2211 | * @tx_ring: ring to send buffer on |
| 2145 | * @skb: send buffer | 2212 | * @skb: send buffer |
| @@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
| 2396 | if (tsyn) | 2463 | if (tsyn) |
| 2397 | tx_flags |= I40E_TX_FLAGS_TSYN; | 2464 | tx_flags |= I40E_TX_FLAGS_TSYN; |
| 2398 | 2465 | ||
| 2466 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | ||
| 2467 | if (skb_linearize(skb)) | ||
| 2468 | goto out_drop; | ||
| 2469 | |||
| 2399 | skb_tx_timestamp(skb); | 2470 | skb_tx_timestamp(skb); |
| 2400 | 2471 | ||
| 2401 | /* always enable CRC insertion offload */ | 2472 | /* always enable CRC insertion offload */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 18b00231d2f1..dff0baeb1ecc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h | |||
| @@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { | |||
| 112 | 112 | ||
| 113 | #define i40e_rx_desc i40e_32byte_rx_desc | 113 | #define i40e_rx_desc i40e_32byte_rx_desc |
| 114 | 114 | ||
| 115 | #define I40E_MAX_BUFFER_TXD 8 | ||
| 115 | #define I40E_MIN_TX_LEN 17 | 116 | #define I40E_MIN_TX_LEN 17 |
| 116 | #define I40E_MAX_DATA_PER_TXD 8192 | 117 | #define I40E_MAX_DATA_PER_TXD 8192 |
| 117 | 118 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 29004382f462..708891571dae 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
| @@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) | |||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /** | 128 | /** |
| 129 | * i40e_get_head - Retrieve head from head writeback | ||
| 130 | * @tx_ring: tx ring to fetch head of | ||
| 131 | * | ||
| 132 | * Returns value of Tx ring head based on value stored | ||
| 133 | * in head write-back location | ||
| 134 | **/ | ||
| 135 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
| 136 | { | ||
| 137 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
| 138 | |||
| 139 | return le32_to_cpu(*(volatile __le32 *)head); | ||
| 140 | } | ||
| 141 | |||
| 142 | /** | ||
| 129 | * i40e_get_tx_pending - how many tx descriptors not processed | 143 | * i40e_get_tx_pending - how many tx descriptors not processed |
| 130 | * @tx_ring: the ring of descriptors | 144 | * @tx_ring: the ring of descriptors |
| 131 | * | 145 | * |
| @@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) | |||
| 134 | **/ | 148 | **/ |
| 135 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) | 149 | static u32 i40e_get_tx_pending(struct i40e_ring *ring) |
| 136 | { | 150 | { |
| 137 | u32 ntu = ((ring->next_to_clean <= ring->next_to_use) | 151 | u32 head, tail; |
| 138 | ? ring->next_to_use | 152 | |
| 139 | : ring->next_to_use + ring->count); | 153 | head = i40e_get_head(ring); |
| 140 | return ntu - ring->next_to_clean; | 154 | tail = readl(ring->tail); |
| 155 | |||
| 156 | if (head != tail) | ||
| 157 | return (head < tail) ? | ||
| 158 | tail - head : (tail + ring->count - head); | ||
| 159 | |||
| 160 | return 0; | ||
| 141 | } | 161 | } |
| 142 | 162 | ||
| 143 | /** | 163 | /** |
| @@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) | |||
| 146 | **/ | 166 | **/ |
| 147 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | 167 | static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) |
| 148 | { | 168 | { |
| 169 | u32 tx_done = tx_ring->stats.packets; | ||
| 170 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; | ||
| 149 | u32 tx_pending = i40e_get_tx_pending(tx_ring); | 171 | u32 tx_pending = i40e_get_tx_pending(tx_ring); |
| 150 | bool ret = false; | 172 | bool ret = false; |
| 151 | 173 | ||
| @@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) | |||
| 162 | * run the check_tx_hang logic with a transmit completion | 184 | * run the check_tx_hang logic with a transmit completion |
| 163 | * pending but without time to complete it yet. | 185 | * pending but without time to complete it yet. |
| 164 | */ | 186 | */ |
| 165 | if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && | 187 | if ((tx_done_old == tx_done) && tx_pending) { |
| 166 | (tx_pending >= I40E_MIN_DESC_PENDING)) { | ||
| 167 | /* make sure it is true for two checks in a row */ | 188 | /* make sure it is true for two checks in a row */ |
| 168 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, | 189 | ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, |
| 169 | &tx_ring->state); | 190 | &tx_ring->state); |
| 170 | } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || | 191 | } else if (tx_done_old == tx_done && |
| 171 | !(tx_pending < I40E_MIN_DESC_PENDING) || | 192 | (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { |
| 172 | !(tx_pending > 0)) { | ||
| 173 | /* update completed stats and disarm the hang check */ | 193 | /* update completed stats and disarm the hang check */ |
| 174 | tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; | 194 | tx_ring->tx_stats.tx_done_old = tx_done; |
| 175 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); | 195 | clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); |
| 176 | } | 196 | } |
| 177 | 197 | ||
| 178 | return ret; | 198 | return ret; |
| 179 | } | 199 | } |
| 180 | 200 | ||
| 181 | /** | ||
| 182 | * i40e_get_head - Retrieve head from head writeback | ||
| 183 | * @tx_ring: tx ring to fetch head of | ||
| 184 | * | ||
| 185 | * Returns value of Tx ring head based on value stored | ||
| 186 | * in head write-back location | ||
| 187 | **/ | ||
| 188 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) | ||
| 189 | { | ||
| 190 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; | ||
| 191 | |||
| 192 | return le32_to_cpu(*(volatile __le32 *)head); | ||
| 193 | } | ||
| 194 | |||
| 195 | #define WB_STRIDE 0x3 | 201 | #define WB_STRIDE 0x3 |
| 196 | 202 | ||
| 197 | /** | 203 | /** |
| @@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
| 1206 | if (err < 0) | 1212 | if (err < 0) |
| 1207 | return err; | 1213 | return err; |
| 1208 | 1214 | ||
| 1209 | if (protocol == htons(ETH_P_IP)) { | 1215 | iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); |
| 1210 | iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); | 1216 | ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); |
| 1217 | |||
| 1218 | if (iph->version == 4) { | ||
| 1211 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); | 1219 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); |
| 1212 | iph->tot_len = 0; | 1220 | iph->tot_len = 0; |
| 1213 | iph->check = 0; | 1221 | iph->check = 0; |
| 1214 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | 1222 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, |
| 1215 | 0, IPPROTO_TCP, 0); | 1223 | 0, IPPROTO_TCP, 0); |
| 1216 | } else if (skb_is_gso_v6(skb)) { | 1224 | } else if (ipv6h->version == 6) { |
| 1217 | |||
| 1218 | ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) | ||
| 1219 | : ipv6_hdr(skb); | ||
| 1220 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); | 1225 | tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); |
| 1221 | ipv6h->payload_len = 0; | 1226 | ipv6h->payload_len = 0; |
| 1222 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, | 1227 | tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, |
| @@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
| 1274 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | 1279 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; |
| 1275 | } | 1280 | } |
| 1276 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { | 1281 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { |
| 1277 | if (tx_flags & I40E_TX_FLAGS_TSO) { | 1282 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; |
| 1278 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; | 1283 | if (tx_flags & I40E_TX_FLAGS_TSO) |
| 1279 | ip_hdr(skb)->check = 0; | 1284 | ip_hdr(skb)->check = 0; |
| 1280 | } else { | ||
| 1281 | *cd_tunneling |= | ||
| 1282 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | ||
| 1283 | } | ||
| 1284 | } | 1285 | } |
| 1285 | 1286 | ||
| 1286 | /* Now set the ctx descriptor fields */ | 1287 | /* Now set the ctx descriptor fields */ |
| @@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
| 1290 | ((skb_inner_network_offset(skb) - | 1291 | ((skb_inner_network_offset(skb) - |
| 1291 | skb_transport_offset(skb)) >> 1) << | 1292 | skb_transport_offset(skb)) >> 1) << |
| 1292 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; | 1293 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; |
| 1294 | if (this_ip_hdr->version == 6) { | ||
| 1295 | tx_flags &= ~I40E_TX_FLAGS_IPV4; | ||
| 1296 | tx_flags |= I40E_TX_FLAGS_IPV6; | ||
| 1297 | } | ||
| 1298 | |||
| 1293 | 1299 | ||
| 1294 | } else { | 1300 | } else { |
| 1295 | network_hdr_len = skb_network_header_len(skb); | 1301 | network_hdr_len = skb_network_header_len(skb); |
| @@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, | |||
| 1380 | context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); | 1386 | context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); |
| 1381 | } | 1387 | } |
| 1382 | 1388 | ||
| 1389 | /** | ||
| 1390 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | ||
| 1391 | * @skb: send buffer | ||
| 1392 | * @tx_flags: collected send information | ||
| 1393 | * @hdr_len: size of the packet header | ||
| 1394 | * | ||
| 1395 | * Note: Our HW can't scatter-gather more than 8 fragments to build | ||
| 1396 | * a packet on the wire and so we need to figure out the cases where we | ||
| 1397 | * need to linearize the skb. | ||
| 1398 | **/ | ||
| 1399 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | ||
| 1400 | const u8 hdr_len) | ||
| 1401 | { | ||
| 1402 | struct skb_frag_struct *frag; | ||
| 1403 | bool linearize = false; | ||
| 1404 | unsigned int size = 0; | ||
| 1405 | u16 num_frags; | ||
| 1406 | u16 gso_segs; | ||
| 1407 | |||
| 1408 | num_frags = skb_shinfo(skb)->nr_frags; | ||
| 1409 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
| 1410 | |||
| 1411 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | ||
| 1412 | u16 j = 1; | ||
| 1413 | |||
| 1414 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | ||
| 1415 | goto linearize_chk_done; | ||
| 1416 | /* try the simple math, if we have too many frags per segment */ | ||
| 1417 | if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > | ||
| 1418 | I40E_MAX_BUFFER_TXD) { | ||
| 1419 | linearize = true; | ||
| 1420 | goto linearize_chk_done; | ||
| 1421 | } | ||
| 1422 | frag = &skb_shinfo(skb)->frags[0]; | ||
| 1423 | size = hdr_len; | ||
| 1424 | /* we might still have more fragments per segment */ | ||
| 1425 | do { | ||
| 1426 | size += skb_frag_size(frag); | ||
| 1427 | frag++; j++; | ||
| 1428 | if (j == I40E_MAX_BUFFER_TXD) { | ||
| 1429 | if (size < skb_shinfo(skb)->gso_size) { | ||
| 1430 | linearize = true; | ||
| 1431 | break; | ||
| 1432 | } | ||
| 1433 | j = 1; | ||
| 1434 | size -= skb_shinfo(skb)->gso_size; | ||
| 1435 | if (size) | ||
| 1436 | j++; | ||
| 1437 | size += hdr_len; | ||
| 1438 | } | ||
| 1439 | num_frags--; | ||
| 1440 | } while (num_frags); | ||
| 1441 | } else { | ||
| 1442 | if (num_frags >= I40E_MAX_BUFFER_TXD) | ||
| 1443 | linearize = true; | ||
| 1444 | } | ||
| 1445 | |||
| 1446 | linearize_chk_done: | ||
| 1447 | return linearize; | ||
| 1448 | } | ||
| 1449 | |||
| 1383 | /** | 1450 | /** |
| 1384 | * i40e_tx_map - Build the Tx descriptor | 1451 | * i40e_tx_map - Build the Tx descriptor |
| 1385 | * @tx_ring: ring to send buffer on | 1452 | * @tx_ring: ring to send buffer on |
| @@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
| 1654 | else if (tso) | 1721 | else if (tso) |
| 1655 | tx_flags |= I40E_TX_FLAGS_TSO; | 1722 | tx_flags |= I40E_TX_FLAGS_TSO; |
| 1656 | 1723 | ||
| 1724 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | ||
| 1725 | if (skb_linearize(skb)) | ||
| 1726 | goto out_drop; | ||
| 1727 | |||
| 1657 | skb_tx_timestamp(skb); | 1728 | skb_tx_timestamp(skb); |
| 1658 | 1729 | ||
| 1659 | /* always enable CRC insertion offload */ | 1730 | /* always enable CRC insertion offload */ |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 4e15903b2b6d..c950a038237c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h | |||
| @@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { | |||
| 112 | 112 | ||
| 113 | #define i40e_rx_desc i40e_32byte_rx_desc | 113 | #define i40e_rx_desc i40e_32byte_rx_desc |
| 114 | 114 | ||
| 115 | #define I40E_MAX_BUFFER_TXD 8 | ||
| 115 | #define I40E_MIN_TX_LEN 17 | 116 | #define I40E_MIN_TX_LEN 17 |
| 116 | #define I40E_MAX_DATA_PER_TXD 8192 | 117 | #define I40E_MAX_DATA_PER_TXD 8192 |
| 117 | 118 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 2d8ee66138e8..a61009f4b2df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
| @@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) | |||
| 81 | { | 81 | { |
| 82 | u32 loopback_ok = 0; | 82 | u32 loopback_ok = 0; |
| 83 | int i; | 83 | int i; |
| 84 | 84 | bool gro_enabled; | |
| 85 | 85 | ||
| 86 | priv->loopback_ok = 0; | 86 | priv->loopback_ok = 0; |
| 87 | priv->validate_loopback = 1; | 87 | priv->validate_loopback = 1; |
| 88 | gro_enabled = priv->dev->features & NETIF_F_GRO; | ||
| 88 | 89 | ||
| 89 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); | 90 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); |
| 91 | priv->dev->features &= ~NETIF_F_GRO; | ||
| 90 | 92 | ||
| 91 | /* xmit */ | 93 | /* xmit */ |
| 92 | if (mlx4_en_test_loopback_xmit(priv)) { | 94 | if (mlx4_en_test_loopback_xmit(priv)) { |
| @@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) | |||
| 108 | mlx4_en_test_loopback_exit: | 110 | mlx4_en_test_loopback_exit: |
| 109 | 111 | ||
| 110 | priv->validate_loopback = 0; | 112 | priv->validate_loopback = 0; |
| 113 | |||
| 114 | if (gro_enabled) | ||
| 115 | priv->dev->features |= NETIF_F_GRO; | ||
| 116 | |||
| 111 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); | 117 | mlx4_en_update_loopback_state(priv->dev, priv->dev->features); |
| 112 | return !loopback_ok; | 118 | return !loopback_ok; |
| 113 | } | 119 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2bb8553bd905..eda29dbbfcd2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
| @@ -412,7 +412,6 @@ err_icm: | |||
| 412 | 412 | ||
| 413 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | 413 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); |
| 414 | 414 | ||
| 415 | #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC | ||
| 416 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, | 415 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
| 417 | enum mlx4_update_qp_attr attr, | 416 | enum mlx4_update_qp_attr attr, |
| 418 | struct mlx4_update_qp_params *params) | 417 | struct mlx4_update_qp_params *params) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 486e3d26cd4a..d97ca88c55b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
| 713 | struct mlx4_vport_oper_state *vp_oper; | 713 | struct mlx4_vport_oper_state *vp_oper; |
| 714 | struct mlx4_priv *priv; | 714 | struct mlx4_priv *priv; |
| 715 | u32 qp_type; | 715 | u32 qp_type; |
| 716 | int port; | 716 | int port, err = 0; |
| 717 | 717 | ||
| 718 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; | 718 | port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; |
| 719 | priv = mlx4_priv(dev); | 719 | priv = mlx4_priv(dev); |
| @@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
| 738 | } else { | 738 | } else { |
| 739 | struct mlx4_update_qp_params params = {.flags = 0}; | 739 | struct mlx4_update_qp_params params = {.flags = 0}; |
| 740 | 740 | ||
| 741 | mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); | 741 | err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); |
| 742 | if (err) | ||
| 743 | goto out; | ||
| 742 | } | 744 | } |
| 743 | } | 745 | } |
| 744 | 746 | ||
| @@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev, | |||
| 773 | qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; | 775 | qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; |
| 774 | qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; | 776 | qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; |
| 775 | } | 777 | } |
| 776 | return 0; | 778 | out: |
| 779 | return err; | ||
| 777 | } | 780 | } |
| 778 | 781 | ||
| 779 | static int mpt_mask(struct mlx4_dev *dev) | 782 | static int mpt_mask(struct mlx4_dev *dev) |
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 44e8d7d25547..57a6e6cd74fc 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c | |||
| @@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev) | |||
| 1239 | if (mac->phydev) | 1239 | if (mac->phydev) |
| 1240 | phy_start(mac->phydev); | 1240 | phy_start(mac->phydev); |
| 1241 | 1241 | ||
| 1242 | init_timer(&mac->tx->clean_timer); | 1242 | setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, |
| 1243 | mac->tx->clean_timer.function = pasemi_mac_tx_timer; | 1243 | (unsigned long)mac->tx); |
| 1244 | mac->tx->clean_timer.data = (unsigned long)mac->tx; | 1244 | mod_timer(&mac->tx->clean_timer, jiffies + HZ); |
| 1245 | mac->tx->clean_timer.expires = jiffies+HZ; | ||
| 1246 | add_timer(&mac->tx->clean_timer); | ||
| 1247 | 1245 | ||
| 1248 | return 0; | 1246 | return 0; |
| 1249 | 1247 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 6e426ae94692..0a5e204a0179 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h | |||
| @@ -354,7 +354,7 @@ struct cmd_desc_type0 { | |||
| 354 | 354 | ||
| 355 | } __attribute__ ((aligned(64))); | 355 | } __attribute__ ((aligned(64))); |
| 356 | 356 | ||
| 357 | /* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ | 357 | /* Note: sizeof(rcv_desc) should always be a multiple of 2 */ |
| 358 | struct rcv_desc { | 358 | struct rcv_desc { |
| 359 | __le16 reference_handle; | 359 | __le16 reference_handle; |
| 360 | __le16 reserved; | 360 | __le16 reserved; |
| @@ -499,7 +499,7 @@ struct uni_data_desc{ | |||
| 499 | #define NETXEN_IMAGE_START 0x43000 /* compressed image */ | 499 | #define NETXEN_IMAGE_START 0x43000 /* compressed image */ |
| 500 | #define NETXEN_SECONDARY_START 0x200000 /* backup images */ | 500 | #define NETXEN_SECONDARY_START 0x200000 /* backup images */ |
| 501 | #define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ | 501 | #define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ |
| 502 | #define NETXEN_USER_START 0x3E8000 /* Firmare info */ | 502 | #define NETXEN_USER_START 0x3E8000 /* Firmware info */ |
| 503 | #define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ | 503 | #define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ |
| 504 | #define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ | 504 | #define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ |
| 505 | 505 | ||
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fa4317611fd6..f221126a5c4e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
| @@ -314,7 +314,7 @@ struct qlcnic_fdt { | |||
| 314 | #define QLCNIC_BRDCFG_START 0x4000 /* board config */ | 314 | #define QLCNIC_BRDCFG_START 0x4000 /* board config */ |
| 315 | #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ | 315 | #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ |
| 316 | #define QLCNIC_IMAGE_START 0x43000 /* compressed image */ | 316 | #define QLCNIC_IMAGE_START 0x43000 /* compressed image */ |
| 317 | #define QLCNIC_USER_START 0x3E8000 /* Firmare info */ | 317 | #define QLCNIC_USER_START 0x3E8000 /* Firmware info */ |
| 318 | 318 | ||
| 319 | #define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) | 319 | #define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) |
| 320 | #define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) | 320 | #define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ad0020af2193..c70ab40d8698 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) | |||
| 2561 | int rc = -EINVAL; | 2561 | int rc = -EINVAL; |
| 2562 | 2562 | ||
| 2563 | if (!rtl_fw_format_ok(tp, rtl_fw)) { | 2563 | if (!rtl_fw_format_ok(tp, rtl_fw)) { |
| 2564 | netif_err(tp, ifup, dev, "invalid firwmare\n"); | 2564 | netif_err(tp, ifup, dev, "invalid firmware\n"); |
| 2565 | goto out; | 2565 | goto out; |
| 2566 | } | 2566 | } |
| 2567 | 2567 | ||
| @@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) | |||
| 5067 | RTL_W8(ChipCmd, CmdReset); | 5067 | RTL_W8(ChipCmd, CmdReset); |
| 5068 | 5068 | ||
| 5069 | rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); | 5069 | rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); |
| 5070 | |||
| 5071 | netdev_reset_queue(tp->dev); | ||
| 5072 | } | 5070 | } |
| 5073 | 5071 | ||
| 5074 | static void rtl_request_uncached_firmware(struct rtl8169_private *tp) | 5072 | static void rtl_request_uncached_firmware(struct rtl8169_private *tp) |
| @@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
| 7049 | u32 status, len; | 7047 | u32 status, len; |
| 7050 | u32 opts[2]; | 7048 | u32 opts[2]; |
| 7051 | int frags; | 7049 | int frags; |
| 7052 | bool stop_queue; | ||
| 7053 | 7050 | ||
| 7054 | if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { | 7051 | if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { |
| 7055 | netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); | 7052 | netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); |
| @@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
| 7090 | 7087 | ||
| 7091 | txd->opts2 = cpu_to_le32(opts[1]); | 7088 | txd->opts2 = cpu_to_le32(opts[1]); |
| 7092 | 7089 | ||
| 7093 | netdev_sent_queue(dev, skb->len); | ||
| 7094 | |||
| 7095 | skb_tx_timestamp(skb); | 7090 | skb_tx_timestamp(skb); |
| 7096 | 7091 | ||
| 7097 | /* Force memory writes to complete before releasing descriptor */ | 7092 | /* Force memory writes to complete before releasing descriptor */ |
| @@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
| 7106 | 7101 | ||
| 7107 | tp->cur_tx += frags + 1; | 7102 | tp->cur_tx += frags + 1; |
| 7108 | 7103 | ||
| 7109 | stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); | 7104 | RTL_W8(TxPoll, NPQ); |
| 7110 | 7105 | ||
| 7111 | if (!skb->xmit_more || stop_queue || | 7106 | mmiowb(); |
| 7112 | netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) { | ||
| 7113 | RTL_W8(TxPoll, NPQ); | ||
| 7114 | |||
| 7115 | mmiowb(); | ||
| 7116 | } | ||
| 7117 | 7107 | ||
| 7118 | if (stop_queue) { | 7108 | if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { |
| 7119 | /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must | 7109 | /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must |
| 7120 | * not miss a ring update when it notices a stopped queue. | 7110 | * not miss a ring update when it notices a stopped queue. |
| 7121 | */ | 7111 | */ |
| @@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) | |||
| 7198 | static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) | 7188 | static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) |
| 7199 | { | 7189 | { |
| 7200 | unsigned int dirty_tx, tx_left; | 7190 | unsigned int dirty_tx, tx_left; |
| 7201 | unsigned int bytes_compl = 0, pkts_compl = 0; | ||
| 7202 | 7191 | ||
| 7203 | dirty_tx = tp->dirty_tx; | 7192 | dirty_tx = tp->dirty_tx; |
| 7204 | smp_rmb(); | 7193 | smp_rmb(); |
| @@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) | |||
| 7222 | rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, | 7211 | rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, |
| 7223 | tp->TxDescArray + entry); | 7212 | tp->TxDescArray + entry); |
| 7224 | if (status & LastFrag) { | 7213 | if (status & LastFrag) { |
| 7225 | pkts_compl++; | 7214 | u64_stats_update_begin(&tp->tx_stats.syncp); |
| 7226 | bytes_compl += tx_skb->skb->len; | 7215 | tp->tx_stats.packets++; |
| 7216 | tp->tx_stats.bytes += tx_skb->skb->len; | ||
| 7217 | u64_stats_update_end(&tp->tx_stats.syncp); | ||
| 7227 | dev_kfree_skb_any(tx_skb->skb); | 7218 | dev_kfree_skb_any(tx_skb->skb); |
| 7228 | tx_skb->skb = NULL; | 7219 | tx_skb->skb = NULL; |
| 7229 | } | 7220 | } |
| @@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) | |||
| 7232 | } | 7223 | } |
| 7233 | 7224 | ||
| 7234 | if (tp->dirty_tx != dirty_tx) { | 7225 | if (tp->dirty_tx != dirty_tx) { |
| 7235 | netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); | ||
| 7236 | |||
| 7237 | u64_stats_update_begin(&tp->tx_stats.syncp); | ||
| 7238 | tp->tx_stats.packets += pkts_compl; | ||
| 7239 | tp->tx_stats.bytes += bytes_compl; | ||
| 7240 | u64_stats_update_end(&tp->tx_stats.syncp); | ||
| 7241 | |||
| 7242 | tp->dirty_tx = dirty_tx; | 7226 | tp->dirty_tx = dirty_tx; |
| 7243 | /* Sync with rtl8169_start_xmit: | 7227 | /* Sync with rtl8169_start_xmit: |
| 7244 | * - publish dirty_tx ring index (write barrier) | 7228 | * - publish dirty_tx ring index (write barrier) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4da8bd263997..736d5d1624a1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = { | |||
| 508 | .tpauser = 1, | 508 | .tpauser = 1, |
| 509 | .hw_swap = 1, | 509 | .hw_swap = 1, |
| 510 | .rmiimode = 1, | 510 | .rmiimode = 1, |
| 511 | .shift_rd0 = 1, | ||
| 512 | }; | 511 | }; |
| 513 | 512 | ||
| 514 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) | 513 | static void sh_eth_set_rate_sh7724(struct net_device *ndev) |
| @@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev) | |||
| 1392 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ | 1391 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ |
| 1393 | sh_eth_get_stats(ndev); | 1392 | sh_eth_get_stats(ndev); |
| 1394 | sh_eth_reset(ndev); | 1393 | sh_eth_reset(ndev); |
| 1394 | |||
| 1395 | /* Set MAC address again */ | ||
| 1396 | update_mac_address(ndev); | ||
| 1395 | } | 1397 | } |
| 1396 | 1398 | ||
| 1397 | /* free Tx skb function */ | 1399 | /* free Tx skb function */ |
| @@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
| 1407 | txdesc = &mdp->tx_ring[entry]; | 1409 | txdesc = &mdp->tx_ring[entry]; |
| 1408 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) | 1410 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) |
| 1409 | break; | 1411 | break; |
| 1412 | /* TACT bit must be checked before all the following reads */ | ||
| 1413 | rmb(); | ||
| 1410 | /* Free the original skb. */ | 1414 | /* Free the original skb. */ |
| 1411 | if (mdp->tx_skbuff[entry]) { | 1415 | if (mdp->tx_skbuff[entry]) { |
| 1412 | dma_unmap_single(&ndev->dev, txdesc->addr, | 1416 | dma_unmap_single(&ndev->dev, txdesc->addr, |
| @@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1444 | limit = boguscnt; | 1448 | limit = boguscnt; |
| 1445 | rxdesc = &mdp->rx_ring[entry]; | 1449 | rxdesc = &mdp->rx_ring[entry]; |
| 1446 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { | 1450 | while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { |
| 1451 | /* RACT bit must be checked before all the following reads */ | ||
| 1452 | rmb(); | ||
| 1447 | desc_status = edmac_to_cpu(mdp, rxdesc->status); | 1453 | desc_status = edmac_to_cpu(mdp, rxdesc->status); |
| 1448 | pkt_len = rxdesc->frame_length; | 1454 | pkt_len = rxdesc->frame_length; |
| 1449 | 1455 | ||
| @@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1455 | 1461 | ||
| 1456 | /* In case of almost all GETHER/ETHERs, the Receive Frame State | 1462 | /* In case of almost all GETHER/ETHERs, the Receive Frame State |
| 1457 | * (RFS) bits in the Receive Descriptor 0 are from bit 9 to | 1463 | * (RFS) bits in the Receive Descriptor 0 are from bit 9 to |
| 1458 | * bit 0. However, in case of the R8A7740, R8A779x, and | 1464 | * bit 0. However, in case of the R8A7740 and R7S72100 |
| 1459 | * R7S72100 the RFS bits are from bit 25 to bit 16. So, the | 1465 | * the RFS bits are from bit 25 to bit 16. So, the |
| 1460 | * driver needs right shifting by 16. | 1466 | * driver needs right shifting by 16. |
| 1461 | */ | 1467 | */ |
| 1462 | if (mdp->cd->shift_rd0) | 1468 | if (mdp->cd->shift_rd0) |
| @@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1523 | skb_checksum_none_assert(skb); | 1529 | skb_checksum_none_assert(skb); |
| 1524 | rxdesc->addr = dma_addr; | 1530 | rxdesc->addr = dma_addr; |
| 1525 | } | 1531 | } |
| 1532 | wmb(); /* RACT bit must be set after all the above writes */ | ||
| 1526 | if (entry >= mdp->num_rx_ring - 1) | 1533 | if (entry >= mdp->num_rx_ring - 1) |
| 1527 | rxdesc->status |= | 1534 | rxdesc->status |= |
| 1528 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); | 1535 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); |
| @@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1535 | /* If we don't need to check status, don't. -KDU */ | 1542 | /* If we don't need to check status, don't. -KDU */ |
| 1536 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { | 1543 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { |
| 1537 | /* fix the values for the next receiving if RDE is set */ | 1544 | /* fix the values for the next receiving if RDE is set */ |
| 1538 | if (intr_status & EESR_RDE) { | 1545 | if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) { |
| 1539 | u32 count = (sh_eth_read(ndev, RDFAR) - | 1546 | u32 count = (sh_eth_read(ndev, RDFAR) - |
| 1540 | sh_eth_read(ndev, RDLAR)) >> 4; | 1547 | sh_eth_read(ndev, RDLAR)) >> 4; |
| 1541 | 1548 | ||
| @@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 2174 | } | 2181 | } |
| 2175 | spin_unlock_irqrestore(&mdp->lock, flags); | 2182 | spin_unlock_irqrestore(&mdp->lock, flags); |
| 2176 | 2183 | ||
| 2177 | if (skb_padto(skb, ETH_ZLEN)) | 2184 | if (skb_put_padto(skb, ETH_ZLEN)) |
| 2178 | return NETDEV_TX_OK; | 2185 | return NETDEV_TX_OK; |
| 2179 | 2186 | ||
| 2180 | entry = mdp->cur_tx % mdp->num_tx_ring; | 2187 | entry = mdp->cur_tx % mdp->num_tx_ring; |
| @@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 2192 | } | 2199 | } |
| 2193 | txdesc->buffer_length = skb->len; | 2200 | txdesc->buffer_length = skb->len; |
| 2194 | 2201 | ||
| 2202 | wmb(); /* TACT bit must be set after all the above writes */ | ||
| 2195 | if (entry >= mdp->num_tx_ring - 1) | 2203 | if (entry >= mdp->num_tx_ring - 1) |
| 2196 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | 2204 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
| 2197 | else | 2205 | else |
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 34389b6aa67c..9fb6948e14c6 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
| @@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) | |||
| 1257 | u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); | 1257 | u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); |
| 1258 | 1258 | ||
| 1259 | if (enable) | 1259 | if (enable) |
| 1260 | val |= 1 << rocker_port->lport; | 1260 | val |= 1ULL << rocker_port->lport; |
| 1261 | else | 1261 | else |
| 1262 | val &= ~(1 << rocker_port->lport); | 1262 | val &= ~(1ULL << rocker_port->lport); |
| 1263 | rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); | 1263 | rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); |
| 1264 | } | 1264 | } |
| 1265 | 1265 | ||
| @@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker) | |||
| 4201 | 4201 | ||
| 4202 | alloc_size = sizeof(struct rocker_port *) * rocker->port_count; | 4202 | alloc_size = sizeof(struct rocker_port *) * rocker->port_count; |
| 4203 | rocker->ports = kmalloc(alloc_size, GFP_KERNEL); | 4203 | rocker->ports = kmalloc(alloc_size, GFP_KERNEL); |
| 4204 | if (!rocker->ports) | ||
| 4205 | return -ENOMEM; | ||
| 4204 | for (i = 0; i < rocker->port_count; i++) { | 4206 | for (i = 0; i < rocker->port_count; i++) { |
| 4205 | err = rocker_probe_port(rocker, i); | 4207 | err = rocker_probe_port(rocker, i); |
| 4206 | if (err) | 4208 | if (err) |
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 6b33127ab352..3449893aea8d 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c | |||
| @@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev) | |||
| 1070 | smc->packets_waiting = 0; | 1070 | smc->packets_waiting = 0; |
| 1071 | 1071 | ||
| 1072 | smc_reset(dev); | 1072 | smc_reset(dev); |
| 1073 | init_timer(&smc->media); | 1073 | setup_timer(&smc->media, media_check, (u_long)dev); |
| 1074 | smc->media.function = media_check; | 1074 | mod_timer(&smc->media, jiffies + HZ); |
| 1075 | smc->media.data = (u_long) dev; | ||
| 1076 | smc->media.expires = jiffies + HZ; | ||
| 1077 | add_timer(&smc->media); | ||
| 1078 | 1075 | ||
| 1079 | return 0; | 1076 | return 0; |
| 1080 | } /* smc_open */ | 1077 | } /* smc_open */ |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 88a55f95fe09..5d093dc0f5f5 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
| @@ -91,6 +91,11 @@ static const char version[] = | |||
| 91 | 91 | ||
| 92 | #include "smc91x.h" | 92 | #include "smc91x.h" |
| 93 | 93 | ||
| 94 | #if defined(CONFIG_ASSABET_NEPONSET) | ||
| 95 | #include <mach/assabet.h> | ||
| 96 | #include <mach/neponset.h> | ||
| 97 | #endif | ||
| 98 | |||
| 94 | #ifndef SMC_NOWAIT | 99 | #ifndef SMC_NOWAIT |
| 95 | # define SMC_NOWAIT 0 | 100 | # define SMC_NOWAIT 0 |
| 96 | #endif | 101 | #endif |
| @@ -2355,8 +2360,9 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
| 2355 | ret = smc_request_attrib(pdev, ndev); | 2360 | ret = smc_request_attrib(pdev, ndev); |
| 2356 | if (ret) | 2361 | if (ret) |
| 2357 | goto out_release_io; | 2362 | goto out_release_io; |
| 2358 | #if defined(CONFIG_SA1100_ASSABET) | 2363 | #if defined(CONFIG_ASSABET_NEPONSET) |
| 2359 | neponset_ncr_set(NCR_ENET_OSC_EN); | 2364 | if (machine_is_assabet() && machine_has_neponset()) |
| 2365 | neponset_ncr_set(NCR_ENET_OSC_EN); | ||
| 2360 | #endif | 2366 | #endif |
| 2361 | platform_set_drvdata(pdev, ndev); | 2367 | platform_set_drvdata(pdev, ndev); |
| 2362 | ret = smc_enable_device(pdev); | 2368 | ret = smc_enable_device(pdev); |
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index be67baf5f677..3a18501d1068 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
| @@ -39,14 +39,7 @@ | |||
| 39 | * Define your architecture specific bus configuration parameters here. | 39 | * Define your architecture specific bus configuration parameters here. |
| 40 | */ | 40 | */ |
| 41 | 41 | ||
| 42 | #if defined(CONFIG_ARCH_LUBBOCK) ||\ | 42 | #if defined(CONFIG_ARM) |
| 43 | defined(CONFIG_MACH_MAINSTONE) ||\ | ||
| 44 | defined(CONFIG_MACH_ZYLONITE) ||\ | ||
| 45 | defined(CONFIG_MACH_LITTLETON) ||\ | ||
| 46 | defined(CONFIG_MACH_ZYLONITE2) ||\ | ||
| 47 | defined(CONFIG_ARCH_VIPER) ||\ | ||
| 48 | defined(CONFIG_MACH_STARGATE2) ||\ | ||
| 49 | defined(CONFIG_ARCH_VERSATILE) | ||
| 50 | 43 | ||
| 51 | #include <asm/mach-types.h> | 44 | #include <asm/mach-types.h> |
| 52 | 45 | ||
| @@ -74,95 +67,8 @@ | |||
| 74 | /* We actually can't write halfwords properly if not word aligned */ | 67 | /* We actually can't write halfwords properly if not word aligned */ |
| 75 | static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | 68 | static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) |
| 76 | { | 69 | { |
| 77 | if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { | 70 | if ((machine_is_mainstone() || machine_is_stargate2() || |
| 78 | unsigned int v = val << 16; | 71 | machine_is_pxa_idp()) && reg & 2) { |
| 79 | v |= readl(ioaddr + (reg & ~2)) & 0xffff; | ||
| 80 | writel(v, ioaddr + (reg & ~2)); | ||
| 81 | } else { | ||
| 82 | writew(val, ioaddr + reg); | ||
| 83 | } | ||
| 84 | } | ||
| 85 | |||
| 86 | #elif defined(CONFIG_SA1100_PLEB) | ||
| 87 | /* We can only do 16-bit reads and writes in the static memory space. */ | ||
| 88 | #define SMC_CAN_USE_8BIT 1 | ||
| 89 | #define SMC_CAN_USE_16BIT 1 | ||
| 90 | #define SMC_CAN_USE_32BIT 0 | ||
| 91 | #define SMC_IO_SHIFT 0 | ||
| 92 | #define SMC_NOWAIT 1 | ||
| 93 | |||
| 94 | #define SMC_inb(a, r) readb((a) + (r)) | ||
| 95 | #define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) | ||
| 96 | #define SMC_inw(a, r) readw((a) + (r)) | ||
| 97 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
| 98 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
| 99 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) | ||
| 100 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
| 101 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
| 102 | |||
| 103 | #define SMC_IRQ_FLAGS (-1) | ||
| 104 | |||
| 105 | #elif defined(CONFIG_SA1100_ASSABET) | ||
| 106 | |||
| 107 | #include <mach/neponset.h> | ||
| 108 | |||
| 109 | /* We can only do 8-bit reads and writes in the static memory space. */ | ||
| 110 | #define SMC_CAN_USE_8BIT 1 | ||
| 111 | #define SMC_CAN_USE_16BIT 0 | ||
| 112 | #define SMC_CAN_USE_32BIT 0 | ||
| 113 | #define SMC_NOWAIT 1 | ||
| 114 | |||
| 115 | /* The first two address lines aren't connected... */ | ||
| 116 | #define SMC_IO_SHIFT 2 | ||
| 117 | |||
| 118 | #define SMC_inb(a, r) readb((a) + (r)) | ||
| 119 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
| 120 | #define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) | ||
| 121 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) | ||
| 122 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
| 123 | |||
| 124 | #elif defined(CONFIG_MACH_LOGICPD_PXA270) || \ | ||
| 125 | defined(CONFIG_MACH_NOMADIK_8815NHK) | ||
| 126 | |||
| 127 | #define SMC_CAN_USE_8BIT 0 | ||
| 128 | #define SMC_CAN_USE_16BIT 1 | ||
| 129 | #define SMC_CAN_USE_32BIT 0 | ||
| 130 | #define SMC_IO_SHIFT 0 | ||
| 131 | #define SMC_NOWAIT 1 | ||
| 132 | |||
| 133 | #define SMC_inw(a, r) readw((a) + (r)) | ||
| 134 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
| 135 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
| 136 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
| 137 | |||
| 138 | #elif defined(CONFIG_ARCH_INNOKOM) || \ | ||
| 139 | defined(CONFIG_ARCH_PXA_IDP) || \ | ||
| 140 | defined(CONFIG_ARCH_RAMSES) || \ | ||
| 141 | defined(CONFIG_ARCH_PCM027) | ||
| 142 | |||
| 143 | #define SMC_CAN_USE_8BIT 1 | ||
| 144 | #define SMC_CAN_USE_16BIT 1 | ||
| 145 | #define SMC_CAN_USE_32BIT 1 | ||
| 146 | #define SMC_IO_SHIFT 0 | ||
| 147 | #define SMC_NOWAIT 1 | ||
| 148 | #define SMC_USE_PXA_DMA 1 | ||
| 149 | |||
| 150 | #define SMC_inb(a, r) readb((a) + (r)) | ||
| 151 | #define SMC_inw(a, r) readw((a) + (r)) | ||
| 152 | #define SMC_inl(a, r) readl((a) + (r)) | ||
| 153 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
| 154 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
| 155 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
| 156 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
| 157 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
| 158 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
| 159 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
| 160 | |||
| 161 | /* We actually can't write halfwords properly if not word aligned */ | ||
| 162 | static inline void | ||
| 163 | SMC_outw(u16 val, void __iomem *ioaddr, int reg) | ||
| 164 | { | ||
| 165 | if (reg & 2) { | ||
| 166 | unsigned int v = val << 16; | 72 | unsigned int v = val << 16; |
| 167 | v |= readl(ioaddr + (reg & ~2)) & 0xffff; | 73 | v |= readl(ioaddr + (reg & ~2)) & 0xffff; |
| 168 | writel(v, ioaddr + (reg & ~2)); | 74 | writel(v, ioaddr + (reg & ~2)); |
| @@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
| 237 | #define RPC_LSA_DEFAULT RPC_LED_100_10 | 143 | #define RPC_LSA_DEFAULT RPC_LED_100_10 |
| 238 | #define RPC_LSB_DEFAULT RPC_LED_TX_RX | 144 | #define RPC_LSB_DEFAULT RPC_LED_TX_RX |
| 239 | 145 | ||
| 240 | #elif defined(CONFIG_ARCH_MSM) | ||
| 241 | |||
| 242 | #define SMC_CAN_USE_8BIT 0 | ||
| 243 | #define SMC_CAN_USE_16BIT 1 | ||
| 244 | #define SMC_CAN_USE_32BIT 0 | ||
| 245 | #define SMC_NOWAIT 1 | ||
| 246 | |||
| 247 | #define SMC_inw(a, r) readw((a) + (r)) | ||
| 248 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
| 249 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
| 250 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
| 251 | |||
| 252 | #define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH | ||
| 253 | |||
| 254 | #elif defined(CONFIG_COLDFIRE) | 146 | #elif defined(CONFIG_COLDFIRE) |
| 255 | 147 | ||
| 256 | #define SMC_CAN_USE_8BIT 0 | 148 | #define SMC_CAN_USE_8BIT 0 |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 55e89b3838f1..a0ea84fe6519 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
| 310 | spin_lock_irqsave(&priv->lock, flags); | 310 | spin_lock_irqsave(&priv->lock, flags); |
| 311 | if (!priv->eee_active) { | 311 | if (!priv->eee_active) { |
| 312 | priv->eee_active = 1; | 312 | priv->eee_active = 1; |
| 313 | init_timer(&priv->eee_ctrl_timer); | 313 | setup_timer(&priv->eee_ctrl_timer, |
| 314 | priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; | 314 | stmmac_eee_ctrl_timer, |
| 315 | priv->eee_ctrl_timer.data = (unsigned long)priv; | 315 | (unsigned long)priv); |
| 316 | priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); | 316 | mod_timer(&priv->eee_ctrl_timer, |
| 317 | add_timer(&priv->eee_ctrl_timer); | 317 | STMMAC_LPI_T(eee_timer)); |
| 318 | 318 | ||
| 319 | priv->hw->mac->set_eee_timer(priv->hw, | 319 | priv->hw->mac->set_eee_timer(priv->hw, |
| 320 | STMMAC_DEFAULT_LIT_LS, | 320 | STMMAC_DEFAULT_LIT_LS, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
| 272 | struct stmmac_priv *priv = NULL; | 272 | struct stmmac_priv *priv = NULL; |
| 273 | struct plat_stmmacenet_data *plat_dat = NULL; | 273 | struct plat_stmmacenet_data *plat_dat = NULL; |
| 274 | const char *mac = NULL; | 274 | const char *mac = NULL; |
| 275 | int irq, wol_irq, lpi_irq; | ||
| 276 | |||
| 277 | /* Get IRQ information early to have an ability to ask for deferred | ||
| 278 | * probe if needed before we went too far with resource allocation. | ||
| 279 | */ | ||
| 280 | irq = platform_get_irq_byname(pdev, "macirq"); | ||
| 281 | if (irq < 0) { | ||
| 282 | if (irq != -EPROBE_DEFER) { | ||
| 283 | dev_err(dev, | ||
| 284 | "MAC IRQ configuration information not found\n"); | ||
| 285 | } | ||
| 286 | return irq; | ||
| 287 | } | ||
| 288 | |||
| 289 | /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
| 290 | * The external wake up irq can be passed through the platform code | ||
| 291 | * named as "eth_wake_irq" | ||
| 292 | * | ||
| 293 | * In case the wake up interrupt is not passed from the platform | ||
| 294 | * so the driver will continue to use the mac irq (ndev->irq) | ||
| 295 | */ | ||
| 296 | wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
| 297 | if (wol_irq < 0) { | ||
| 298 | if (wol_irq == -EPROBE_DEFER) | ||
| 299 | return -EPROBE_DEFER; | ||
| 300 | wol_irq = irq; | ||
| 301 | } | ||
| 302 | |||
| 303 | lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
| 304 | if (lpi_irq == -EPROBE_DEFER) | ||
| 305 | return -EPROBE_DEFER; | ||
| 275 | 306 | ||
| 276 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 307 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 277 | addr = devm_ioremap_resource(dev, res); | 308 | addr = devm_ioremap_resource(dev, res); |
| @@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
| 323 | return PTR_ERR(priv); | 354 | return PTR_ERR(priv); |
| 324 | } | 355 | } |
| 325 | 356 | ||
| 357 | /* Copy IRQ values to priv structure which is now avaialble */ | ||
| 358 | priv->dev->irq = irq; | ||
| 359 | priv->wol_irq = wol_irq; | ||
| 360 | priv->lpi_irq = lpi_irq; | ||
| 361 | |||
| 326 | /* Get MAC address if available (DT) */ | 362 | /* Get MAC address if available (DT) */ |
| 327 | if (mac) | 363 | if (mac) |
| 328 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); | 364 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); |
| 329 | 365 | ||
| 330 | /* Get the MAC information */ | ||
| 331 | priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); | ||
| 332 | if (priv->dev->irq < 0) { | ||
| 333 | if (priv->dev->irq != -EPROBE_DEFER) { | ||
| 334 | netdev_err(priv->dev, | ||
| 335 | "MAC IRQ configuration information not found\n"); | ||
| 336 | } | ||
| 337 | return priv->dev->irq; | ||
| 338 | } | ||
| 339 | |||
| 340 | /* | ||
| 341 | * On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
| 342 | * The external wake up irq can be passed through the platform code | ||
| 343 | * named as "eth_wake_irq" | ||
| 344 | * | ||
| 345 | * In case the wake up interrupt is not passed from the platform | ||
| 346 | * so the driver will continue to use the mac irq (ndev->irq) | ||
| 347 | */ | ||
| 348 | priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
| 349 | if (priv->wol_irq < 0) { | ||
| 350 | if (priv->wol_irq == -EPROBE_DEFER) | ||
| 351 | return -EPROBE_DEFER; | ||
| 352 | priv->wol_irq = priv->dev->irq; | ||
| 353 | } | ||
| 354 | |||
| 355 | priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
| 356 | if (priv->lpi_irq == -EPROBE_DEFER) | ||
| 357 | return -EPROBE_DEFER; | ||
| 358 | |||
| 359 | platform_set_drvdata(pdev, priv->dev); | 366 | platform_set_drvdata(pdev, priv->dev); |
| 360 | 367 | ||
| 361 | pr_debug("STMMAC platform driver registration completed"); | 368 | pr_debug("STMMAC platform driver registration completed"); |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4b51f903fb73..0c5842aeb807 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
| @@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type) | |||
| 6989 | *flow_type = IP_USER_FLOW; | 6989 | *flow_type = IP_USER_FLOW; |
| 6990 | break; | 6990 | break; |
| 6991 | default: | 6991 | default: |
| 6992 | return 0; | 6992 | return -EINVAL; |
| 6993 | } | 6993 | } |
| 6994 | 6994 | ||
| 6995 | return 1; | 6995 | return 0; |
| 6996 | } | 6996 | } |
| 6997 | 6997 | ||
| 6998 | static int niu_ethflow_to_class(int flow_type, u64 *class) | 6998 | static int niu_ethflow_to_class(int flow_type, u64 *class) |
| @@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np, | |||
| 7198 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> | 7198 | class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> |
| 7199 | TCAM_V4KEY0_CLASS_CODE_SHIFT; | 7199 | TCAM_V4KEY0_CLASS_CODE_SHIFT; |
| 7200 | ret = niu_class_to_ethflow(class, &fsp->flow_type); | 7200 | ret = niu_class_to_ethflow(class, &fsp->flow_type); |
| 7201 | |||
| 7202 | if (ret < 0) { | 7201 | if (ret < 0) { |
| 7203 | netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", | 7202 | netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", |
| 7204 | parent->index); | 7203 | parent->index); |
| 7205 | ret = -EINVAL; | ||
| 7206 | goto out; | 7204 | goto out; |
| 7207 | } | 7205 | } |
| 7208 | 7206 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7d8dd0d2182e..a1bbaf6352ba 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( | |||
| 1103 | cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, | 1103 | cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, |
| 1104 | port_mask, ALE_VLAN, slave->port_vlan, 0); | 1104 | port_mask, ALE_VLAN, slave->port_vlan, 0); |
| 1105 | cpsw_ale_add_ucast(priv->ale, priv->mac_addr, | 1105 | cpsw_ale_add_ucast(priv->ale, priv->mac_addr, |
| 1106 | priv->host_port, ALE_VLAN, slave->port_vlan); | 1106 | priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); |
| 1107 | } | 1107 | } |
| 1108 | 1108 | ||
| 1109 | static void soft_reset_slave(struct cpsw_slave *slave) | 1109 | static void soft_reset_slave(struct cpsw_slave *slave) |
| @@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev) | |||
| 2466 | return 0; | 2466 | return 0; |
| 2467 | } | 2467 | } |
| 2468 | 2468 | ||
| 2469 | #ifdef CONFIG_PM_SLEEP | ||
| 2469 | static int cpsw_suspend(struct device *dev) | 2470 | static int cpsw_suspend(struct device *dev) |
| 2470 | { | 2471 | { |
| 2471 | struct platform_device *pdev = to_platform_device(dev); | 2472 | struct platform_device *pdev = to_platform_device(dev); |
| @@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev) | |||
| 2518 | } | 2519 | } |
| 2519 | return 0; | 2520 | return 0; |
| 2520 | } | 2521 | } |
| 2522 | #endif | ||
| 2521 | 2523 | ||
| 2522 | static const struct dev_pm_ops cpsw_pm_ops = { | 2524 | static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); |
| 2523 | .suspend = cpsw_suspend, | ||
| 2524 | .resume = cpsw_resume, | ||
| 2525 | }; | ||
| 2526 | 2525 | ||
| 2527 | static const struct of_device_id cpsw_of_mtable[] = { | 2526 | static const struct of_device_id cpsw_of_mtable[] = { |
| 2528 | { .compatible = "ti,cpsw", }, | 2527 | { .compatible = "ti,cpsw", }, |
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 98655b44b97e..c00084d689f3 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c | |||
| @@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev) | |||
| 423 | return 0; | 423 | return 0; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | #ifdef CONFIG_PM_SLEEP | ||
| 426 | static int davinci_mdio_suspend(struct device *dev) | 427 | static int davinci_mdio_suspend(struct device *dev) |
| 427 | { | 428 | { |
| 428 | struct davinci_mdio_data *data = dev_get_drvdata(dev); | 429 | struct davinci_mdio_data *data = dev_get_drvdata(dev); |
| @@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev) | |||
| 464 | 465 | ||
| 465 | return 0; | 466 | return 0; |
| 466 | } | 467 | } |
| 468 | #endif | ||
| 467 | 469 | ||
| 468 | static const struct dev_pm_ops davinci_mdio_pm_ops = { | 470 | static const struct dev_pm_ops davinci_mdio_pm_ops = { |
| 469 | .suspend_late = davinci_mdio_suspend, | 471 | SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) |
| 470 | .resume_early = davinci_mdio_resume, | ||
| 471 | }; | 472 | }; |
| 472 | 473 | ||
| 473 | #if IS_ENABLED(CONFIG_OF) | 474 | #if IS_ENABLED(CONFIG_OF) |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index f7e0f0f7c2e2..9e16a2819d48 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
| @@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev) | |||
| 938 | int i; | 938 | int i; |
| 939 | static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; | 939 | static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
| 940 | 940 | ||
| 941 | if (dev->flags & IFF_ALLMULTI) { | 941 | if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { |
| 942 | for (i = 0; i < ETH_ALEN; i++) { | 942 | for (i = 0; i < ETH_ALEN; i++) { |
| 943 | __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); | 943 | __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); |
| 944 | __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); | 944 | __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index e40fdfccc9c1..27ecc5c4fa26 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
| @@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, | |||
| 654 | } /* else everything is zero */ | 654 | } /* else everything is zero */ |
| 655 | } | 655 | } |
| 656 | 656 | ||
| 657 | /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ | ||
| 658 | #define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) | ||
| 659 | |||
| 657 | /* Get packet from user space buffer */ | 660 | /* Get packet from user space buffer */ |
| 658 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | 661 | static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, |
| 659 | struct iov_iter *from, int noblock) | 662 | struct iov_iter *from, int noblock) |
| 660 | { | 663 | { |
| 661 | int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); | 664 | int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); |
| 662 | struct sk_buff *skb; | 665 | struct sk_buff *skb; |
| 663 | struct macvlan_dev *vlan; | 666 | struct macvlan_dev *vlan; |
| 664 | unsigned long total_len = iov_iter_count(from); | 667 | unsigned long total_len = iov_iter_count(from); |
| @@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
| 722 | linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); | 725 | linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); |
| 723 | } | 726 | } |
| 724 | 727 | ||
| 725 | skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, | 728 | skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, |
| 726 | linear, noblock, &err); | 729 | linear, noblock, &err); |
| 727 | if (!skb) | 730 | if (!skb) |
| 728 | goto err; | 731 | goto err; |
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index 9e3af54c9010..32efbd48f326 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c | |||
| @@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
| 92 | #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" | 92 | #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" |
| 93 | #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" | 93 | #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" |
| 94 | #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" | 94 | #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" |
| 95 | #define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config" | ||
| 96 | #define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable" | ||
| 95 | 97 | ||
| 96 | #define XGBE_PHY_SPEEDS 3 | 98 | #define XGBE_PHY_SPEEDS 3 |
| 97 | #define XGBE_PHY_SPEED_1000 0 | 99 | #define XGBE_PHY_SPEED_1000 0 |
| @@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
| 177 | #define SPEED_10000_BLWC 0 | 179 | #define SPEED_10000_BLWC 0 |
| 178 | #define SPEED_10000_CDR 0x7 | 180 | #define SPEED_10000_CDR 0x7 |
| 179 | #define SPEED_10000_PLL 0x1 | 181 | #define SPEED_10000_PLL 0x1 |
| 180 | #define SPEED_10000_PQ 0x1e | 182 | #define SPEED_10000_PQ 0x12 |
| 181 | #define SPEED_10000_RATE 0x0 | 183 | #define SPEED_10000_RATE 0x0 |
| 182 | #define SPEED_10000_TXAMP 0xa | 184 | #define SPEED_10000_TXAMP 0xa |
| 183 | #define SPEED_10000_WORD 0x7 | 185 | #define SPEED_10000_WORD 0x7 |
| 186 | #define SPEED_10000_DFE_TAP_CONFIG 0x1 | ||
| 187 | #define SPEED_10000_DFE_TAP_ENABLE 0x7f | ||
| 184 | 188 | ||
| 185 | #define SPEED_2500_BLWC 1 | 189 | #define SPEED_2500_BLWC 1 |
| 186 | #define SPEED_2500_CDR 0x2 | 190 | #define SPEED_2500_CDR 0x2 |
| @@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
| 189 | #define SPEED_2500_RATE 0x1 | 193 | #define SPEED_2500_RATE 0x1 |
| 190 | #define SPEED_2500_TXAMP 0xf | 194 | #define SPEED_2500_TXAMP 0xf |
| 191 | #define SPEED_2500_WORD 0x1 | 195 | #define SPEED_2500_WORD 0x1 |
| 196 | #define SPEED_2500_DFE_TAP_CONFIG 0x3 | ||
| 197 | #define SPEED_2500_DFE_TAP_ENABLE 0x0 | ||
| 192 | 198 | ||
| 193 | #define SPEED_1000_BLWC 1 | 199 | #define SPEED_1000_BLWC 1 |
| 194 | #define SPEED_1000_CDR 0x2 | 200 | #define SPEED_1000_CDR 0x2 |
| @@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); | |||
| 197 | #define SPEED_1000_RATE 0x3 | 203 | #define SPEED_1000_RATE 0x3 |
| 198 | #define SPEED_1000_TXAMP 0xf | 204 | #define SPEED_1000_TXAMP 0xf |
| 199 | #define SPEED_1000_WORD 0x1 | 205 | #define SPEED_1000_WORD 0x1 |
| 206 | #define SPEED_1000_DFE_TAP_CONFIG 0x3 | ||
| 207 | #define SPEED_1000_DFE_TAP_ENABLE 0x0 | ||
| 200 | 208 | ||
| 201 | /* SerDes RxTx register offsets */ | 209 | /* SerDes RxTx register offsets */ |
| 210 | #define RXTX_REG6 0x0018 | ||
| 202 | #define RXTX_REG20 0x0050 | 211 | #define RXTX_REG20 0x0050 |
| 212 | #define RXTX_REG22 0x0058 | ||
| 203 | #define RXTX_REG114 0x01c8 | 213 | #define RXTX_REG114 0x01c8 |
| 214 | #define RXTX_REG129 0x0204 | ||
| 204 | 215 | ||
| 205 | /* SerDes RxTx register entry bit positions and sizes */ | 216 | /* SerDes RxTx register entry bit positions and sizes */ |
| 217 | #define RXTX_REG6_RESETB_RXD_INDEX 8 | ||
| 218 | #define RXTX_REG6_RESETB_RXD_WIDTH 1 | ||
| 206 | #define RXTX_REG20_BLWC_ENA_INDEX 2 | 219 | #define RXTX_REG20_BLWC_ENA_INDEX 2 |
| 207 | #define RXTX_REG20_BLWC_ENA_WIDTH 1 | 220 | #define RXTX_REG20_BLWC_ENA_WIDTH 1 |
| 208 | #define RXTX_REG114_PQ_REG_INDEX 9 | 221 | #define RXTX_REG114_PQ_REG_INDEX 9 |
| 209 | #define RXTX_REG114_PQ_REG_WIDTH 7 | 222 | #define RXTX_REG114_PQ_REG_WIDTH 7 |
| 223 | #define RXTX_REG129_RXDFE_CONFIG_INDEX 14 | ||
| 224 | #define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 | ||
| 210 | 225 | ||
| 211 | /* Bit setting and getting macros | 226 | /* Bit setting and getting macros |
| 212 | * The get macro will extract the current bit field value from within | 227 | * The get macro will extract the current bit field value from within |
| @@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = { | |||
| 333 | SPEED_10000_TXAMP, | 348 | SPEED_10000_TXAMP, |
| 334 | }; | 349 | }; |
| 335 | 350 | ||
| 351 | static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = { | ||
| 352 | SPEED_1000_DFE_TAP_CONFIG, | ||
| 353 | SPEED_2500_DFE_TAP_CONFIG, | ||
| 354 | SPEED_10000_DFE_TAP_CONFIG, | ||
| 355 | }; | ||
| 356 | |||
| 357 | static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = { | ||
| 358 | SPEED_1000_DFE_TAP_ENABLE, | ||
| 359 | SPEED_2500_DFE_TAP_ENABLE, | ||
| 360 | SPEED_10000_DFE_TAP_ENABLE, | ||
| 361 | }; | ||
| 362 | |||
| 336 | enum amd_xgbe_phy_an { | 363 | enum amd_xgbe_phy_an { |
| 337 | AMD_XGBE_AN_READY = 0, | 364 | AMD_XGBE_AN_READY = 0, |
| 338 | AMD_XGBE_AN_PAGE_RECEIVED, | 365 | AMD_XGBE_AN_PAGE_RECEIVED, |
| @@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv { | |||
| 393 | u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; | 420 | u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; |
| 394 | u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; | 421 | u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; |
| 395 | u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; | 422 | u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; |
| 423 | u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS]; | ||
| 424 | u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS]; | ||
| 396 | 425 | ||
| 397 | /* Auto-negotiation state machine support */ | 426 | /* Auto-negotiation state machine support */ |
| 398 | struct mutex an_mutex; | 427 | struct mutex an_mutex; |
| @@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev) | |||
| 481 | status = XSIR0_IOREAD(priv, SIR0_STATUS); | 510 | status = XSIR0_IOREAD(priv, SIR0_STATUS); |
| 482 | if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && | 511 | if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && |
| 483 | XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) | 512 | XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) |
| 484 | return; | 513 | goto rx_reset; |
| 485 | } | 514 | } |
| 486 | 515 | ||
| 487 | netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", | 516 | netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", |
| 488 | status); | 517 | status); |
| 518 | |||
| 519 | rx_reset: | ||
| 520 | /* Perform Rx reset for the DFE changes */ | ||
| 521 | XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0); | ||
| 522 | XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1); | ||
| 489 | } | 523 | } |
| 490 | 524 | ||
| 491 | static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) | 525 | static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) |
| @@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) | |||
| 534 | priv->serdes_blwc[XGBE_PHY_SPEED_10000]); | 568 | priv->serdes_blwc[XGBE_PHY_SPEED_10000]); |
| 535 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, | 569 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, |
| 536 | priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); | 570 | priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); |
| 571 | XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, | ||
| 572 | priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]); | ||
| 573 | XRXTX_IOWRITE(priv, RXTX_REG22, | ||
| 574 | priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]); | ||
| 537 | 575 | ||
| 538 | amd_xgbe_phy_serdes_complete_ratechange(phydev); | 576 | amd_xgbe_phy_serdes_complete_ratechange(phydev); |
| 539 | 577 | ||
| @@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev) | |||
| 586 | priv->serdes_blwc[XGBE_PHY_SPEED_2500]); | 624 | priv->serdes_blwc[XGBE_PHY_SPEED_2500]); |
| 587 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, | 625 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, |
| 588 | priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); | 626 | priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); |
| 627 | XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, | ||
| 628 | priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]); | ||
| 629 | XRXTX_IOWRITE(priv, RXTX_REG22, | ||
| 630 | priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]); | ||
| 589 | 631 | ||
| 590 | amd_xgbe_phy_serdes_complete_ratechange(phydev); | 632 | amd_xgbe_phy_serdes_complete_ratechange(phydev); |
| 591 | 633 | ||
| @@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) | |||
| 638 | priv->serdes_blwc[XGBE_PHY_SPEED_1000]); | 680 | priv->serdes_blwc[XGBE_PHY_SPEED_1000]); |
| 639 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, | 681 | XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, |
| 640 | priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); | 682 | priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); |
| 683 | XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, | ||
| 684 | priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]); | ||
| 685 | XRXTX_IOWRITE(priv, RXTX_REG22, | ||
| 686 | priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]); | ||
| 641 | 687 | ||
| 642 | amd_xgbe_phy_serdes_complete_ratechange(phydev); | 688 | amd_xgbe_phy_serdes_complete_ratechange(phydev); |
| 643 | 689 | ||
| @@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) | |||
| 1668 | sizeof(priv->serdes_tx_amp)); | 1714 | sizeof(priv->serdes_tx_amp)); |
| 1669 | } | 1715 | } |
| 1670 | 1716 | ||
| 1717 | if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) { | ||
| 1718 | ret = device_property_read_u32_array(phy_dev, | ||
| 1719 | XGBE_PHY_DFE_CFG_PROPERTY, | ||
| 1720 | priv->serdes_dfe_tap_cfg, | ||
| 1721 | XGBE_PHY_SPEEDS); | ||
| 1722 | if (ret) { | ||
| 1723 | dev_err(dev, "invalid %s property\n", | ||
| 1724 | XGBE_PHY_DFE_CFG_PROPERTY); | ||
| 1725 | goto err_sir1; | ||
| 1726 | } | ||
| 1727 | } else { | ||
| 1728 | memcpy(priv->serdes_dfe_tap_cfg, | ||
| 1729 | amd_xgbe_phy_serdes_dfe_tap_cfg, | ||
| 1730 | sizeof(priv->serdes_dfe_tap_cfg)); | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) { | ||
| 1734 | ret = device_property_read_u32_array(phy_dev, | ||
| 1735 | XGBE_PHY_DFE_ENA_PROPERTY, | ||
| 1736 | priv->serdes_dfe_tap_ena, | ||
| 1737 | XGBE_PHY_SPEEDS); | ||
| 1738 | if (ret) { | ||
| 1739 | dev_err(dev, "invalid %s property\n", | ||
| 1740 | XGBE_PHY_DFE_ENA_PROPERTY); | ||
| 1741 | goto err_sir1; | ||
| 1742 | } | ||
| 1743 | } else { | ||
| 1744 | memcpy(priv->serdes_dfe_tap_ena, | ||
| 1745 | amd_xgbe_phy_serdes_dfe_tap_ena, | ||
| 1746 | sizeof(priv->serdes_dfe_tap_ena)); | ||
| 1747 | } | ||
| 1748 | |||
| 1671 | phydev->priv = priv; | 1749 | phydev->priv = priv; |
| 1672 | 1750 | ||
| 1673 | if (!priv->adev || acpi_disabled) | 1751 | if (!priv->adev || acpi_disabled) |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index cdcac6aa4260..52cd8db2c57d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features) | |||
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | /** | 238 | /** |
| 239 | * phy_check_valid - check if there is a valid PHY setting which matches | ||
| 240 | * speed, duplex, and feature mask | ||
| 241 | * @speed: speed to match | ||
| 242 | * @duplex: duplex to match | ||
| 243 | * @features: A mask of the valid settings | ||
| 244 | * | ||
| 245 | * Description: Returns true if there is a valid setting, false otherwise. | ||
| 246 | */ | ||
| 247 | static inline bool phy_check_valid(int speed, int duplex, u32 features) | ||
| 248 | { | ||
| 249 | unsigned int idx; | ||
| 250 | |||
| 251 | idx = phy_find_valid(phy_find_setting(speed, duplex), features); | ||
| 252 | |||
| 253 | return settings[idx].speed == speed && settings[idx].duplex == duplex && | ||
| 254 | (settings[idx].setting & features); | ||
| 255 | } | ||
| 256 | |||
| 257 | /** | ||
| 239 | * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex | 258 | * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex |
| 240 | * @phydev: the target phy_device struct | 259 | * @phydev: the target phy_device struct |
| 241 | * | 260 | * |
| @@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
| 1045 | int eee_lp, eee_cap, eee_adv; | 1064 | int eee_lp, eee_cap, eee_adv; |
| 1046 | u32 lp, cap, adv; | 1065 | u32 lp, cap, adv; |
| 1047 | int status; | 1066 | int status; |
| 1048 | unsigned int idx; | ||
| 1049 | 1067 | ||
| 1050 | /* Read phy status to properly get the right settings */ | 1068 | /* Read phy status to properly get the right settings */ |
| 1051 | status = phy_read_status(phydev); | 1069 | status = phy_read_status(phydev); |
| @@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
| 1077 | 1095 | ||
| 1078 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); | 1096 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); |
| 1079 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); | 1097 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); |
| 1080 | idx = phy_find_setting(phydev->speed, phydev->duplex); | 1098 | if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) |
| 1081 | if (!(lp & adv & settings[idx].setting)) | ||
| 1082 | goto eee_exit_err; | 1099 | goto eee_exit_err; |
| 1083 | 1100 | ||
| 1084 | if (clk_stop_enable) { | 1101 | if (clk_stop_enable) { |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 0e62274e884a..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -43,9 +43,7 @@ | |||
| 43 | 43 | ||
| 44 | static struct team_port *team_port_get_rcu(const struct net_device *dev) | 44 | static struct team_port *team_port_get_rcu(const struct net_device *dev) |
| 45 | { | 45 | { |
| 46 | struct team_port *port = rcu_dereference(dev->rx_handler_data); | 46 | return rcu_dereference(dev->rx_handler_data); |
| 47 | |||
| 48 | return team_port_exists(dev) ? port : NULL; | ||
| 49 | } | 47 | } |
| 50 | 48 | ||
| 51 | static struct team_port *team_port_get_rtnl(const struct net_device *dev) | 49 | static struct team_port *team_port_get_rtnl(const struct net_device *dev) |
| @@ -1732,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) | |||
| 1732 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) | 1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) |
| 1733 | return -EADDRNOTAVAIL; | 1731 | return -EADDRNOTAVAIL; |
| 1734 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
| 1735 | rcu_read_lock(); | 1733 | mutex_lock(&team->lock); |
| 1736 | list_for_each_entry_rcu(port, &team->port_list, list) | 1734 | list_for_each_entry(port, &team->port_list, list) |
| 1737 | if (team->ops.port_change_dev_addr) | 1735 | if (team->ops.port_change_dev_addr) |
| 1738 | team->ops.port_change_dev_addr(team, port); | 1736 | team->ops.port_change_dev_addr(team, port); |
| 1739 | rcu_read_unlock(); | 1737 | mutex_unlock(&team->lock); |
| 1740 | return 0; | 1738 | return 0; |
| 1741 | } | 1739 | } |
| 1742 | 1740 | ||
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3bd9678315ad..7ba8d0885f12 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
| @@ -161,6 +161,7 @@ config USB_NET_AX8817X | |||
| 161 | * Linksys USB200M | 161 | * Linksys USB200M |
| 162 | * Netgear FA120 | 162 | * Netgear FA120 |
| 163 | * Sitecom LN-029 | 163 | * Sitecom LN-029 |
| 164 | * Sitecom LN-028 | ||
| 164 | * Intellinet USB 2.0 Ethernet | 165 | * Intellinet USB 2.0 Ethernet |
| 165 | * ST Lab USB 2.0 Ethernet | 166 | * ST Lab USB 2.0 Ethernet |
| 166 | * TrendNet TU2-ET100 | 167 | * TrendNet TU2-ET100 |
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index bf49792062a2..1173a24feda3 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
| @@ -979,6 +979,10 @@ static const struct usb_device_id products [] = { | |||
| 979 | USB_DEVICE (0x0df6, 0x0056), | 979 | USB_DEVICE (0x0df6, 0x0056), |
| 980 | .driver_info = (unsigned long) &ax88178_info, | 980 | .driver_info = (unsigned long) &ax88178_info, |
| 981 | }, { | 981 | }, { |
| 982 | // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter" | ||
| 983 | USB_DEVICE (0x0df6, 0x061c), | ||
| 984 | .driver_info = (unsigned long) &ax88178_info, | ||
| 985 | }, { | ||
| 982 | // corega FEther USB2-TX | 986 | // corega FEther USB2-TX |
| 983 | USB_DEVICE (0x07aa, 0x0017), | 987 | USB_DEVICE (0x07aa, 0x0017), |
| 984 | .driver_info = (unsigned long) &ax8817x_info, | 988 | .driver_info = (unsigned long) &ax8817x_info, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 9cdfb3fe9c15..778e91531fac 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
| @@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) | |||
| 1594 | } | 1594 | } |
| 1595 | cprev = cnow; | 1595 | cprev = cnow; |
| 1596 | } | 1596 | } |
| 1597 | current->state = TASK_RUNNING; | 1597 | __set_current_state(TASK_RUNNING); |
| 1598 | remove_wait_queue(&tiocmget->waitq, &wait); | 1598 | remove_wait_queue(&tiocmget->waitq, &wait); |
| 1599 | 1599 | ||
| 1600 | return ret; | 1600 | return ret; |
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 3d18bb0eee85..1bfe0fcaccf5 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c | |||
| @@ -134,6 +134,11 @@ static const struct usb_device_id products [] = { | |||
| 134 | }, { | 134 | }, { |
| 135 | USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ | 135 | USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ |
| 136 | .driver_info = (unsigned long) &prolific_info, | 136 | .driver_info = (unsigned long) &prolific_info, |
| 137 | }, { | ||
| 138 | USB_DEVICE(0x3923, 0x7825), /* National Instruments USB | ||
| 139 | * Host-to-Host Cable | ||
| 140 | */ | ||
| 141 | .driver_info = (unsigned long) &prolific_info, | ||
| 137 | }, | 142 | }, |
| 138 | 143 | ||
| 139 | { }, // END | 144 | { }, // END |
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 83c39e2858bf..88d121d43c08 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c | |||
| @@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file, | |||
| 806 | spin_lock_irqsave(&cosa->lock, flags); | 806 | spin_lock_irqsave(&cosa->lock, flags); |
| 807 | add_wait_queue(&chan->rxwaitq, &wait); | 807 | add_wait_queue(&chan->rxwaitq, &wait); |
| 808 | while (!chan->rx_status) { | 808 | while (!chan->rx_status) { |
| 809 | current->state = TASK_INTERRUPTIBLE; | 809 | set_current_state(TASK_INTERRUPTIBLE); |
| 810 | spin_unlock_irqrestore(&cosa->lock, flags); | 810 | spin_unlock_irqrestore(&cosa->lock, flags); |
| 811 | schedule(); | 811 | schedule(); |
| 812 | spin_lock_irqsave(&cosa->lock, flags); | 812 | spin_lock_irqsave(&cosa->lock, flags); |
| 813 | if (signal_pending(current) && chan->rx_status == 0) { | 813 | if (signal_pending(current) && chan->rx_status == 0) { |
| 814 | chan->rx_status = 1; | 814 | chan->rx_status = 1; |
| 815 | remove_wait_queue(&chan->rxwaitq, &wait); | 815 | remove_wait_queue(&chan->rxwaitq, &wait); |
| 816 | current->state = TASK_RUNNING; | 816 | __set_current_state(TASK_RUNNING); |
| 817 | spin_unlock_irqrestore(&cosa->lock, flags); | 817 | spin_unlock_irqrestore(&cosa->lock, flags); |
| 818 | mutex_unlock(&chan->rlock); | 818 | mutex_unlock(&chan->rlock); |
| 819 | return -ERESTARTSYS; | 819 | return -ERESTARTSYS; |
| 820 | } | 820 | } |
| 821 | } | 821 | } |
| 822 | remove_wait_queue(&chan->rxwaitq, &wait); | 822 | remove_wait_queue(&chan->rxwaitq, &wait); |
| 823 | current->state = TASK_RUNNING; | 823 | __set_current_state(TASK_RUNNING); |
| 824 | kbuf = chan->rxdata; | 824 | kbuf = chan->rxdata; |
| 825 | count = chan->rxsize; | 825 | count = chan->rxsize; |
| 826 | spin_unlock_irqrestore(&cosa->lock, flags); | 826 | spin_unlock_irqrestore(&cosa->lock, flags); |
| @@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file, | |||
| 890 | spin_lock_irqsave(&cosa->lock, flags); | 890 | spin_lock_irqsave(&cosa->lock, flags); |
| 891 | add_wait_queue(&chan->txwaitq, &wait); | 891 | add_wait_queue(&chan->txwaitq, &wait); |
| 892 | while (!chan->tx_status) { | 892 | while (!chan->tx_status) { |
| 893 | current->state = TASK_INTERRUPTIBLE; | 893 | set_current_state(TASK_INTERRUPTIBLE); |
| 894 | spin_unlock_irqrestore(&cosa->lock, flags); | 894 | spin_unlock_irqrestore(&cosa->lock, flags); |
| 895 | schedule(); | 895 | schedule(); |
| 896 | spin_lock_irqsave(&cosa->lock, flags); | 896 | spin_lock_irqsave(&cosa->lock, flags); |
| 897 | if (signal_pending(current) && chan->tx_status == 0) { | 897 | if (signal_pending(current) && chan->tx_status == 0) { |
| 898 | chan->tx_status = 1; | 898 | chan->tx_status = 1; |
| 899 | remove_wait_queue(&chan->txwaitq, &wait); | 899 | remove_wait_queue(&chan->txwaitq, &wait); |
| 900 | current->state = TASK_RUNNING; | 900 | __set_current_state(TASK_RUNNING); |
| 901 | chan->tx_status = 1; | 901 | chan->tx_status = 1; |
| 902 | spin_unlock_irqrestore(&cosa->lock, flags); | 902 | spin_unlock_irqrestore(&cosa->lock, flags); |
| 903 | up(&chan->wsem); | 903 | up(&chan->wsem); |
| @@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file, | |||
| 905 | } | 905 | } |
| 906 | } | 906 | } |
| 907 | remove_wait_queue(&chan->txwaitq, &wait); | 907 | remove_wait_queue(&chan->txwaitq, &wait); |
| 908 | current->state = TASK_RUNNING; | 908 | __set_current_state(TASK_RUNNING); |
| 909 | up(&chan->wsem); | 909 | up(&chan->wsem); |
| 910 | spin_unlock_irqrestore(&cosa->lock, flags); | 910 | spin_unlock_irqrestore(&cosa->lock, flags); |
| 911 | kfree(kbuf); | 911 | kfree(kbuf); |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 4a4c6586a8d2..8908be6dbc48 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, | |||
| 946 | goto nla_put_failure; | 946 | goto nla_put_failure; |
| 947 | 947 | ||
| 948 | genlmsg_end(skb, msg_head); | 948 | genlmsg_end(skb, msg_head); |
| 949 | genlmsg_unicast(&init_net, skb, dst_portid); | 949 | if (genlmsg_unicast(&init_net, skb, dst_portid)) |
| 950 | goto err_free_txskb; | ||
| 950 | 951 | ||
| 951 | /* Enqueue the packet */ | 952 | /* Enqueue the packet */ |
| 952 | skb_queue_tail(&data->pending, my_skb); | 953 | skb_queue_tail(&data->pending, my_skb); |
| @@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, | |||
| 955 | return; | 956 | return; |
| 956 | 957 | ||
| 957 | nla_put_failure: | 958 | nla_put_failure: |
| 959 | nlmsg_free(skb); | ||
| 960 | err_free_txskb: | ||
| 958 | printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); | 961 | printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); |
| 959 | ieee80211_free_txskb(hw, my_skb); | 962 | ieee80211_free_txskb(hw, my_skb); |
| 960 | data->tx_failed++; | 963 | data->tx_failed++; |
diff --git a/drivers/net/wireless/ti/wilink_platform_data.c b/drivers/net/wireless/ti/wilink_platform_data.c index a92bd3e89796..ea0e359bdb43 100644 --- a/drivers/net/wireless/ti/wilink_platform_data.c +++ b/drivers/net/wireless/ti/wilink_platform_data.c | |||
| @@ -23,31 +23,6 @@ | |||
| 23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
| 24 | #include <linux/wl12xx.h> | 24 | #include <linux/wl12xx.h> |
| 25 | 25 | ||
| 26 | static struct wl12xx_platform_data *wl12xx_platform_data; | ||
| 27 | |||
| 28 | int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data) | ||
| 29 | { | ||
| 30 | if (wl12xx_platform_data) | ||
| 31 | return -EBUSY; | ||
| 32 | if (!data) | ||
| 33 | return -EINVAL; | ||
| 34 | |||
| 35 | wl12xx_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL); | ||
| 36 | if (!wl12xx_platform_data) | ||
| 37 | return -ENOMEM; | ||
| 38 | |||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 42 | struct wl12xx_platform_data *wl12xx_get_platform_data(void) | ||
| 43 | { | ||
| 44 | if (!wl12xx_platform_data) | ||
| 45 | return ERR_PTR(-ENODEV); | ||
| 46 | |||
| 47 | return wl12xx_platform_data; | ||
| 48 | } | ||
| 49 | EXPORT_SYMBOL(wl12xx_get_platform_data); | ||
| 50 | |||
| 51 | static struct wl1251_platform_data *wl1251_platform_data; | 26 | static struct wl1251_platform_data *wl1251_platform_data; |
| 52 | 27 | ||
| 53 | int __init wl1251_set_platform_data(const struct wl1251_platform_data *data) | 28 | int __init wl1251_set_platform_data(const struct wl1251_platform_data *data) |
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index 144d1f8ba473..af0fe2e17151 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c | |||
| @@ -24,8 +24,6 @@ | |||
| 24 | 24 | ||
| 25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
| 26 | 26 | ||
| 27 | #include <linux/wl12xx.h> | ||
| 28 | |||
| 29 | #include "../wlcore/wlcore.h" | 27 | #include "../wlcore/wlcore.h" |
| 30 | #include "../wlcore/debug.h" | 28 | #include "../wlcore/debug.h" |
| 31 | #include "../wlcore/io.h" | 29 | #include "../wlcore/io.h" |
| @@ -1770,11 +1768,44 @@ wl12xx_iface_combinations[] = { | |||
| 1770 | }, | 1768 | }, |
| 1771 | }; | 1769 | }; |
| 1772 | 1770 | ||
| 1771 | static const struct wl12xx_clock wl12xx_refclock_table[] = { | ||
| 1772 | { 19200000, false, WL12XX_REFCLOCK_19 }, | ||
| 1773 | { 26000000, false, WL12XX_REFCLOCK_26 }, | ||
| 1774 | { 26000000, true, WL12XX_REFCLOCK_26_XTAL }, | ||
| 1775 | { 38400000, false, WL12XX_REFCLOCK_38 }, | ||
| 1776 | { 38400000, true, WL12XX_REFCLOCK_38_XTAL }, | ||
| 1777 | { 52000000, false, WL12XX_REFCLOCK_52 }, | ||
| 1778 | { 0, false, 0 } | ||
| 1779 | }; | ||
| 1780 | |||
| 1781 | static const struct wl12xx_clock wl12xx_tcxoclock_table[] = { | ||
| 1782 | { 16368000, true, WL12XX_TCXOCLOCK_16_368 }, | ||
| 1783 | { 16800000, true, WL12XX_TCXOCLOCK_16_8 }, | ||
| 1784 | { 19200000, true, WL12XX_TCXOCLOCK_19_2 }, | ||
| 1785 | { 26000000, true, WL12XX_TCXOCLOCK_26 }, | ||
| 1786 | { 32736000, true, WL12XX_TCXOCLOCK_32_736 }, | ||
| 1787 | { 33600000, true, WL12XX_TCXOCLOCK_33_6 }, | ||
| 1788 | { 38400000, true, WL12XX_TCXOCLOCK_38_4 }, | ||
| 1789 | { 52000000, true, WL12XX_TCXOCLOCK_52 }, | ||
| 1790 | { 0, false, 0 } | ||
| 1791 | }; | ||
| 1792 | |||
| 1793 | static int wl12xx_get_clock_idx(const struct wl12xx_clock *table, | ||
| 1794 | u32 freq, bool xtal) | ||
| 1795 | { | ||
| 1796 | int i; | ||
| 1797 | |||
| 1798 | for (i = 0; table[i].freq != 0; i++) | ||
| 1799 | if ((table[i].freq == freq) && (table[i].xtal == xtal)) | ||
| 1800 | return table[i].hw_idx; | ||
| 1801 | |||
| 1802 | return -EINVAL; | ||
| 1803 | } | ||
| 1804 | |||
| 1773 | static int wl12xx_setup(struct wl1271 *wl) | 1805 | static int wl12xx_setup(struct wl1271 *wl) |
| 1774 | { | 1806 | { |
| 1775 | struct wl12xx_priv *priv = wl->priv; | 1807 | struct wl12xx_priv *priv = wl->priv; |
| 1776 | struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev); | 1808 | struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev); |
| 1777 | struct wl12xx_platform_data *pdata = pdev_data->pdata; | ||
| 1778 | 1809 | ||
| 1779 | BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS); | 1810 | BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS); |
| 1780 | BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS); | 1811 | BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS); |
| @@ -1799,7 +1830,17 @@ static int wl12xx_setup(struct wl1271 *wl) | |||
| 1799 | wl12xx_conf_init(wl); | 1830 | wl12xx_conf_init(wl); |
| 1800 | 1831 | ||
| 1801 | if (!fref_param) { | 1832 | if (!fref_param) { |
| 1802 | priv->ref_clock = pdata->board_ref_clock; | 1833 | priv->ref_clock = wl12xx_get_clock_idx(wl12xx_refclock_table, |
| 1834 | pdev_data->ref_clock_freq, | ||
| 1835 | pdev_data->ref_clock_xtal); | ||
| 1836 | if (priv->ref_clock < 0) { | ||
| 1837 | wl1271_error("Invalid ref_clock frequency (%d Hz, %s)", | ||
| 1838 | pdev_data->ref_clock_freq, | ||
| 1839 | pdev_data->ref_clock_xtal ? | ||
| 1840 | "XTAL" : "not XTAL"); | ||
| 1841 | |||
| 1842 | return priv->ref_clock; | ||
| 1843 | } | ||
| 1803 | } else { | 1844 | } else { |
| 1804 | if (!strcmp(fref_param, "19.2")) | 1845 | if (!strcmp(fref_param, "19.2")) |
| 1805 | priv->ref_clock = WL12XX_REFCLOCK_19; | 1846 | priv->ref_clock = WL12XX_REFCLOCK_19; |
| @@ -1817,9 +1858,17 @@ static int wl12xx_setup(struct wl1271 *wl) | |||
| 1817 | wl1271_error("Invalid fref parameter %s", fref_param); | 1858 | wl1271_error("Invalid fref parameter %s", fref_param); |
| 1818 | } | 1859 | } |
| 1819 | 1860 | ||
| 1820 | if (!tcxo_param) { | 1861 | if (!tcxo_param && pdev_data->tcxo_clock_freq) { |
| 1821 | priv->tcxo_clock = pdata->board_tcxo_clock; | 1862 | priv->tcxo_clock = wl12xx_get_clock_idx(wl12xx_tcxoclock_table, |
| 1822 | } else { | 1863 | pdev_data->tcxo_clock_freq, |
| 1864 | true); | ||
| 1865 | if (priv->tcxo_clock < 0) { | ||
| 1866 | wl1271_error("Invalid tcxo_clock frequency (%d Hz)", | ||
| 1867 | pdev_data->tcxo_clock_freq); | ||
| 1868 | |||
| 1869 | return priv->tcxo_clock; | ||
| 1870 | } | ||
| 1871 | } else if (tcxo_param) { | ||
| 1823 | if (!strcmp(tcxo_param, "19.2")) | 1872 | if (!strcmp(tcxo_param, "19.2")) |
| 1824 | priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2; | 1873 | priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2; |
| 1825 | else if (!strcmp(tcxo_param, "26")) | 1874 | else if (!strcmp(tcxo_param, "26")) |
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h index 75c92658bfea..5952e99ace1b 100644 --- a/drivers/net/wireless/ti/wl12xx/wl12xx.h +++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h | |||
| @@ -82,6 +82,34 @@ struct wl12xx_priv { | |||
| 82 | struct wl127x_rx_mem_pool_addr *rx_mem_addr; | 82 | struct wl127x_rx_mem_pool_addr *rx_mem_addr; |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | /* Reference clock values */ | ||
| 86 | enum { | ||
| 87 | WL12XX_REFCLOCK_19 = 0, /* 19.2 MHz */ | ||
| 88 | WL12XX_REFCLOCK_26 = 1, /* 26 MHz */ | ||
| 89 | WL12XX_REFCLOCK_38 = 2, /* 38.4 MHz */ | ||
| 90 | WL12XX_REFCLOCK_52 = 3, /* 52 MHz */ | ||
| 91 | WL12XX_REFCLOCK_38_XTAL = 4, /* 38.4 MHz, XTAL */ | ||
| 92 | WL12XX_REFCLOCK_26_XTAL = 5, /* 26 MHz, XTAL */ | ||
| 93 | }; | ||
| 94 | |||
| 95 | /* TCXO clock values */ | ||
| 96 | enum { | ||
| 97 | WL12XX_TCXOCLOCK_19_2 = 0, /* 19.2MHz */ | ||
| 98 | WL12XX_TCXOCLOCK_26 = 1, /* 26 MHz */ | ||
| 99 | WL12XX_TCXOCLOCK_38_4 = 2, /* 38.4MHz */ | ||
| 100 | WL12XX_TCXOCLOCK_52 = 3, /* 52 MHz */ | ||
| 101 | WL12XX_TCXOCLOCK_16_368 = 4, /* 16.368 MHz */ | ||
| 102 | WL12XX_TCXOCLOCK_32_736 = 5, /* 32.736 MHz */ | ||
| 103 | WL12XX_TCXOCLOCK_16_8 = 6, /* 16.8 MHz */ | ||
| 104 | WL12XX_TCXOCLOCK_33_6 = 7, /* 33.6 MHz */ | ||
| 105 | }; | ||
| 106 | |||
| 107 | struct wl12xx_clock { | ||
| 108 | u32 freq; | ||
| 109 | bool xtal; | ||
| 110 | u8 hw_idx; | ||
| 111 | }; | ||
| 112 | |||
| 85 | struct wl12xx_fw_packet_counters { | 113 | struct wl12xx_fw_packet_counters { |
| 86 | /* Cumulative counter of released packets per AC */ | 114 | /* Cumulative counter of released packets per AC */ |
| 87 | u8 tx_released_pkts[NUM_TX_QUEUES]; | 115 | u8 tx_released_pkts[NUM_TX_QUEUES]; |
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c index 77752b03f189..19b7ec7b69c2 100644 --- a/drivers/net/wireless/ti/wlcore/boot.c +++ b/drivers/net/wireless/ti/wlcore/boot.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 25 | #include <linux/wl12xx.h> | ||
| 26 | #include <linux/export.h> | 25 | #include <linux/export.h> |
| 27 | 26 | ||
| 28 | #include "debug.h" | 27 | #include "debug.h" |
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index 68f3bf229b5a..eb43f94a1597 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c | |||
| @@ -502,7 +502,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf, | |||
| 502 | DRIVER_STATE_PRINT_HEX(irq); | 502 | DRIVER_STATE_PRINT_HEX(irq); |
| 503 | /* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */ | 503 | /* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */ |
| 504 | DRIVER_STATE_PRINT_HEX(hw_pg_ver); | 504 | DRIVER_STATE_PRINT_HEX(hw_pg_ver); |
| 505 | DRIVER_STATE_PRINT_HEX(platform_quirks); | 505 | DRIVER_STATE_PRINT_HEX(irq_flags); |
| 506 | DRIVER_STATE_PRINT_HEX(chip.id); | 506 | DRIVER_STATE_PRINT_HEX(chip.id); |
| 507 | DRIVER_STATE_PRINT_STR(chip.fw_ver_str); | 507 | DRIVER_STATE_PRINT_STR(chip.fw_ver_str); |
| 508 | DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str); | 508 | DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str); |
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 1e136993580f..0be807951afe 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c | |||
| @@ -25,8 +25,8 @@ | |||
| 25 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
| 26 | #include <linux/etherdevice.h> | 26 | #include <linux/etherdevice.h> |
| 27 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
| 28 | #include <linux/wl12xx.h> | ||
| 29 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
| 29 | #include <linux/irq.h> | ||
| 30 | 30 | ||
| 31 | #include "wlcore.h" | 31 | #include "wlcore.h" |
| 32 | #include "debug.h" | 32 | #include "debug.h" |
| @@ -538,7 +538,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) | |||
| 538 | * In case edge triggered interrupt must be used, we cannot iterate | 538 | * In case edge triggered interrupt must be used, we cannot iterate |
| 539 | * more than once without introducing race conditions with the hardirq. | 539 | * more than once without introducing race conditions with the hardirq. |
| 540 | */ | 540 | */ |
| 541 | if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) | 541 | if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) |
| 542 | loopcount = 1; | 542 | loopcount = 1; |
| 543 | 543 | ||
| 544 | wl1271_debug(DEBUG_IRQ, "IRQ work"); | 544 | wl1271_debug(DEBUG_IRQ, "IRQ work"); |
| @@ -6249,7 +6249,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, | |||
| 6249 | wl->ap_ps_map = 0; | 6249 | wl->ap_ps_map = 0; |
| 6250 | wl->ap_fw_ps_map = 0; | 6250 | wl->ap_fw_ps_map = 0; |
| 6251 | wl->quirks = 0; | 6251 | wl->quirks = 0; |
| 6252 | wl->platform_quirks = 0; | ||
| 6253 | wl->system_hlid = WL12XX_SYSTEM_HLID; | 6252 | wl->system_hlid = WL12XX_SYSTEM_HLID; |
| 6254 | wl->active_sta_count = 0; | 6253 | wl->active_sta_count = 0; |
| 6255 | wl->active_link_count = 0; | 6254 | wl->active_link_count = 0; |
| @@ -6390,8 +6389,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) | |||
| 6390 | struct wl1271 *wl = context; | 6389 | struct wl1271 *wl = context; |
| 6391 | struct platform_device *pdev = wl->pdev; | 6390 | struct platform_device *pdev = wl->pdev; |
| 6392 | struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); | 6391 | struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); |
| 6393 | struct wl12xx_platform_data *pdata = pdev_data->pdata; | 6392 | struct resource *res; |
| 6394 | unsigned long irqflags; | 6393 | |
| 6395 | int ret; | 6394 | int ret; |
| 6396 | irq_handler_t hardirq_fn = NULL; | 6395 | irq_handler_t hardirq_fn = NULL; |
| 6397 | 6396 | ||
| @@ -6418,19 +6417,23 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) | |||
| 6418 | /* adjust some runtime configuration parameters */ | 6417 | /* adjust some runtime configuration parameters */ |
| 6419 | wlcore_adjust_conf(wl); | 6418 | wlcore_adjust_conf(wl); |
| 6420 | 6419 | ||
| 6421 | wl->irq = platform_get_irq(pdev, 0); | 6420 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| 6422 | wl->platform_quirks = pdata->platform_quirks; | 6421 | if (!res) { |
| 6422 | wl1271_error("Could not get IRQ resource"); | ||
| 6423 | goto out_free_nvs; | ||
| 6424 | } | ||
| 6425 | |||
| 6426 | wl->irq = res->start; | ||
| 6427 | wl->irq_flags = res->flags & IRQF_TRIGGER_MASK; | ||
| 6423 | wl->if_ops = pdev_data->if_ops; | 6428 | wl->if_ops = pdev_data->if_ops; |
| 6424 | 6429 | ||
| 6425 | if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) { | 6430 | if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) |
| 6426 | irqflags = IRQF_TRIGGER_RISING; | ||
| 6427 | hardirq_fn = wlcore_hardirq; | 6431 | hardirq_fn = wlcore_hardirq; |
| 6428 | } else { | 6432 | else |
| 6429 | irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; | 6433 | wl->irq_flags |= IRQF_ONESHOT; |
| 6430 | } | ||
| 6431 | 6434 | ||
| 6432 | ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq, | 6435 | ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq, |
| 6433 | irqflags, pdev->name, wl); | 6436 | wl->irq_flags, pdev->name, wl); |
| 6434 | if (ret < 0) { | 6437 | if (ret < 0) { |
| 6435 | wl1271_error("request_irq() failed: %d", ret); | 6438 | wl1271_error("request_irq() failed: %d", ret); |
| 6436 | goto out_free_nvs; | 6439 | goto out_free_nvs; |
| @@ -6441,7 +6444,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) | |||
| 6441 | if (!ret) { | 6444 | if (!ret) { |
| 6442 | wl->irq_wake_enabled = true; | 6445 | wl->irq_wake_enabled = true; |
| 6443 | device_init_wakeup(wl->dev, 1); | 6446 | device_init_wakeup(wl->dev, 1); |
| 6444 | if (pdata->pwr_in_suspend) | 6447 | if (pdev_data->pwr_in_suspend) |
| 6445 | wl->hw->wiphy->wowlan = &wlcore_wowlan_support; | 6448 | wl->hw->wiphy->wowlan = &wlcore_wowlan_support; |
| 6446 | } | 6449 | } |
| 6447 | #endif | 6450 | #endif |
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index d3dd7bfdf3f1..ea7e07abca4e 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c | |||
| @@ -31,9 +31,10 @@ | |||
| 31 | #include <linux/mmc/card.h> | 31 | #include <linux/mmc/card.h> |
| 32 | #include <linux/mmc/host.h> | 32 | #include <linux/mmc/host.h> |
| 33 | #include <linux/gpio.h> | 33 | #include <linux/gpio.h> |
| 34 | #include <linux/wl12xx.h> | ||
| 35 | #include <linux/pm_runtime.h> | 34 | #include <linux/pm_runtime.h> |
| 36 | #include <linux/printk.h> | 35 | #include <linux/printk.h> |
| 36 | #include <linux/of.h> | ||
| 37 | #include <linux/of_irq.h> | ||
| 37 | 38 | ||
| 38 | #include "wlcore.h" | 39 | #include "wlcore.h" |
| 39 | #include "wl12xx_80211.h" | 40 | #include "wl12xx_80211.h" |
| @@ -214,6 +215,52 @@ static struct wl1271_if_operations sdio_ops = { | |||
| 214 | .set_block_size = wl1271_sdio_set_block_size, | 215 | .set_block_size = wl1271_sdio_set_block_size, |
| 215 | }; | 216 | }; |
| 216 | 217 | ||
| 218 | #ifdef CONFIG_OF | ||
| 219 | static const struct of_device_id wlcore_sdio_of_match_table[] = { | ||
| 220 | { .compatible = "ti,wl1271" }, | ||
| 221 | { .compatible = "ti,wl1273" }, | ||
| 222 | { .compatible = "ti,wl1281" }, | ||
| 223 | { .compatible = "ti,wl1283" }, | ||
| 224 | { .compatible = "ti,wl1801" }, | ||
| 225 | { .compatible = "ti,wl1805" }, | ||
| 226 | { .compatible = "ti,wl1807" }, | ||
| 227 | { .compatible = "ti,wl1831" }, | ||
| 228 | { .compatible = "ti,wl1835" }, | ||
| 229 | { .compatible = "ti,wl1837" }, | ||
| 230 | { } | ||
| 231 | }; | ||
| 232 | |||
| 233 | static int wlcore_probe_of(struct device *dev, int *irq, | ||
| 234 | struct wlcore_platdev_data *pdev_data) | ||
| 235 | { | ||
| 236 | struct device_node *np = dev->of_node; | ||
| 237 | |||
| 238 | if (!np || !of_match_node(wlcore_sdio_of_match_table, np)) | ||
| 239 | return -ENODATA; | ||
| 240 | |||
| 241 | *irq = irq_of_parse_and_map(np, 0); | ||
| 242 | if (!*irq) { | ||
| 243 | dev_err(dev, "No irq in platform data\n"); | ||
| 244 | kfree(pdev_data); | ||
| 245 | return -EINVAL; | ||
| 246 | } | ||
| 247 | |||
| 248 | /* optional clock frequency params */ | ||
| 249 | of_property_read_u32(np, "ref-clock-frequency", | ||
| 250 | &pdev_data->ref_clock_freq); | ||
| 251 | of_property_read_u32(np, "tcxo-clock-frequency", | ||
| 252 | &pdev_data->tcxo_clock_freq); | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | #else | ||
| 257 | static int wlcore_probe_of(struct device *dev, int *irq, | ||
| 258 | struct wlcore_platdev_data *pdev_data) | ||
| 259 | { | ||
| 260 | return -ENODATA; | ||
| 261 | } | ||
| 262 | #endif | ||
| 263 | |||
| 217 | static int wl1271_probe(struct sdio_func *func, | 264 | static int wl1271_probe(struct sdio_func *func, |
| 218 | const struct sdio_device_id *id) | 265 | const struct sdio_device_id *id) |
| 219 | { | 266 | { |
| @@ -222,6 +269,7 @@ static int wl1271_probe(struct sdio_func *func, | |||
| 222 | struct resource res[1]; | 269 | struct resource res[1]; |
| 223 | mmc_pm_flag_t mmcflags; | 270 | mmc_pm_flag_t mmcflags; |
| 224 | int ret = -ENOMEM; | 271 | int ret = -ENOMEM; |
| 272 | int irq; | ||
| 225 | const char *chip_family; | 273 | const char *chip_family; |
| 226 | 274 | ||
| 227 | /* We are only able to handle the wlan function */ | 275 | /* We are only able to handle the wlan function */ |
| @@ -245,19 +293,15 @@ static int wl1271_probe(struct sdio_func *func, | |||
| 245 | /* Use block mode for transferring over one block size of data */ | 293 | /* Use block mode for transferring over one block size of data */ |
| 246 | func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; | 294 | func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; |
| 247 | 295 | ||
| 248 | pdev_data.pdata = wl12xx_get_platform_data(); | 296 | if (wlcore_probe_of(&func->dev, &irq, &pdev_data)) |
| 249 | if (IS_ERR(pdev_data.pdata)) { | ||
| 250 | ret = PTR_ERR(pdev_data.pdata); | ||
| 251 | dev_err(glue->dev, "missing wlan platform data: %d\n", ret); | ||
| 252 | goto out_free_glue; | 297 | goto out_free_glue; |
| 253 | } | ||
| 254 | 298 | ||
| 255 | /* if sdio can keep power while host is suspended, enable wow */ | 299 | /* if sdio can keep power while host is suspended, enable wow */ |
| 256 | mmcflags = sdio_get_host_pm_caps(func); | 300 | mmcflags = sdio_get_host_pm_caps(func); |
| 257 | dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); | 301 | dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags); |
| 258 | 302 | ||
| 259 | if (mmcflags & MMC_PM_KEEP_POWER) | 303 | if (mmcflags & MMC_PM_KEEP_POWER) |
| 260 | pdev_data.pdata->pwr_in_suspend = true; | 304 | pdev_data.pwr_in_suspend = true; |
| 261 | 305 | ||
| 262 | sdio_set_drvdata(func, glue); | 306 | sdio_set_drvdata(func, glue); |
| 263 | 307 | ||
| @@ -286,8 +330,9 @@ static int wl1271_probe(struct sdio_func *func, | |||
| 286 | 330 | ||
| 287 | memset(res, 0x00, sizeof(res)); | 331 | memset(res, 0x00, sizeof(res)); |
| 288 | 332 | ||
| 289 | res[0].start = pdev_data.pdata->irq; | 333 | res[0].start = irq; |
| 290 | res[0].flags = IORESOURCE_IRQ; | 334 | res[0].flags = IORESOURCE_IRQ | |
| 335 | irqd_get_trigger_type(irq_get_irq_data(irq)); | ||
| 291 | res[0].name = "irq"; | 336 | res[0].name = "irq"; |
| 292 | 337 | ||
| 293 | ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); | 338 | ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); |
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 69601f6741d9..f1ac2839d97c 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c | |||
| @@ -331,11 +331,7 @@ static int wl1271_probe(struct spi_device *spi) | |||
| 331 | 331 | ||
| 332 | memset(&pdev_data, 0x00, sizeof(pdev_data)); | 332 | memset(&pdev_data, 0x00, sizeof(pdev_data)); |
| 333 | 333 | ||
| 334 | pdev_data.pdata = dev_get_platdata(&spi->dev); | 334 | /* TODO: add DT parsing when needed */ |
| 335 | if (!pdev_data.pdata) { | ||
| 336 | dev_err(&spi->dev, "no platform data\n"); | ||
| 337 | return -ENODEV; | ||
| 338 | } | ||
| 339 | 335 | ||
| 340 | pdev_data.if_ops = &spi_ops; | 336 | pdev_data.if_ops = &spi_ops; |
| 341 | 337 | ||
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index d599c869e6e8..7f363fa566a3 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h | |||
| @@ -197,6 +197,8 @@ struct wl1271 { | |||
| 197 | 197 | ||
| 198 | int irq; | 198 | int irq; |
| 199 | 199 | ||
| 200 | int irq_flags; | ||
| 201 | |||
| 200 | spinlock_t wl_lock; | 202 | spinlock_t wl_lock; |
| 201 | 203 | ||
| 202 | enum wlcore_state state; | 204 | enum wlcore_state state; |
| @@ -404,9 +406,6 @@ struct wl1271 { | |||
| 404 | /* Quirks of specific hardware revisions */ | 406 | /* Quirks of specific hardware revisions */ |
| 405 | unsigned int quirks; | 407 | unsigned int quirks; |
| 406 | 408 | ||
| 407 | /* Platform limitations */ | ||
| 408 | unsigned int platform_quirks; | ||
| 409 | |||
| 410 | /* number of currently active RX BA sessions */ | 409 | /* number of currently active RX BA sessions */ |
| 411 | int ba_rx_session_count; | 410 | int ba_rx_session_count; |
| 412 | 411 | ||
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index 3396ce5a934d..39efc6d78b10 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h | |||
| @@ -201,8 +201,12 @@ struct wl1271_if_operations { | |||
| 201 | }; | 201 | }; |
| 202 | 202 | ||
| 203 | struct wlcore_platdev_data { | 203 | struct wlcore_platdev_data { |
| 204 | struct wl12xx_platform_data *pdata; | ||
| 205 | struct wl1271_if_operations *if_ops; | 204 | struct wl1271_if_operations *if_ops; |
| 205 | |||
| 206 | bool ref_clock_xtal; /* specify whether the clock is XTAL or not */ | ||
| 207 | u32 ref_clock_freq; /* in Hertz */ | ||
| 208 | u32 tcxo_clock_freq; /* in Hertz, tcxo is always XTAL */ | ||
| 209 | bool pwr_in_suspend; | ||
| 206 | }; | 210 | }; |
| 207 | 211 | ||
| 208 | #define MAX_NUM_KEYS 14 | 212 | #define MAX_NUM_KEYS 14 |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
| @@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, | |||
| 340 | unsigned int num_queues = vif->num_queues; | 340 | unsigned int num_queues = vif->num_queues; |
| 341 | int i; | 341 | int i; |
| 342 | unsigned int queue_index; | 342 | unsigned int queue_index; |
| 343 | struct xenvif_stats *vif_stats; | ||
| 344 | 343 | ||
| 345 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | 344 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
| 346 | unsigned long accum = 0; | 345 | unsigned long accum = 0; |
| 347 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 346 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
| 348 | vif_stats = &vif->queues[queue_index].stats; | 347 | void *vif_stats = &vif->queues[queue_index].stats; |
| 349 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | 348 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
| 350 | } | 349 | } |
| 351 | data[i] = accum; | 350 | data[i] = accum; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f7a31d2cb3f1..cab9f5257f57 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue, | |||
| 655 | unsigned long flags; | 655 | unsigned long flags; |
| 656 | 656 | ||
| 657 | do { | 657 | do { |
| 658 | int notify; | ||
| 659 | |||
| 658 | spin_lock_irqsave(&queue->response_lock, flags); | 660 | spin_lock_irqsave(&queue->response_lock, flags); |
| 659 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); | 661 | make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); |
| 662 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | ||
| 660 | spin_unlock_irqrestore(&queue->response_lock, flags); | 663 | spin_unlock_irqrestore(&queue->response_lock, flags); |
| 664 | if (notify) | ||
| 665 | notify_remote_via_irq(queue->tx_irq); | ||
| 666 | |||
| 661 | if (cons == end) | 667 | if (cons == end) |
| 662 | break; | 668 | break; |
| 663 | txp = RING_GET_REQUEST(&queue->tx, cons++); | 669 | txp = RING_GET_REQUEST(&queue->tx, cons++); |
| @@ -1343,7 +1349,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
| 1343 | { | 1349 | { |
| 1344 | unsigned int offset = skb_headlen(skb); | 1350 | unsigned int offset = skb_headlen(skb); |
| 1345 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1351 | skb_frag_t frags[MAX_SKB_FRAGS]; |
| 1346 | int i; | 1352 | int i, f; |
| 1347 | struct ubuf_info *uarg; | 1353 | struct ubuf_info *uarg; |
| 1348 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
| 1349 | 1355 | ||
| @@ -1383,23 +1389,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
| 1383 | frags[i].page_offset = 0; | 1389 | frags[i].page_offset = 0; |
| 1384 | skb_frag_size_set(&frags[i], len); | 1390 | skb_frag_size_set(&frags[i], len); |
| 1385 | } | 1391 | } |
| 1386 | /* swap out with old one */ | ||
| 1387 | memcpy(skb_shinfo(skb)->frags, | ||
| 1388 | frags, | ||
| 1389 | i * sizeof(skb_frag_t)); | ||
| 1390 | skb_shinfo(skb)->nr_frags = i; | ||
| 1391 | skb->truesize += i * PAGE_SIZE; | ||
| 1392 | 1392 | ||
| 1393 | /* remove traces of mapped pages and frag_list */ | 1393 | /* Copied all the bits from the frag list -- free it. */ |
| 1394 | skb_frag_list_init(skb); | 1394 | skb_frag_list_init(skb); |
| 1395 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
| 1396 | kfree_skb(nskb); | ||
| 1397 | |||
| 1398 | /* Release all the original (foreign) frags. */ | ||
| 1399 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
| 1400 | skb_frag_unref(skb, f); | ||
| 1395 | uarg = skb_shinfo(skb)->destructor_arg; | 1401 | uarg = skb_shinfo(skb)->destructor_arg; |
| 1396 | /* increase inflight counter to offset decrement in callback */ | 1402 | /* increase inflight counter to offset decrement in callback */ |
| 1397 | atomic_inc(&queue->inflight_packets); | 1403 | atomic_inc(&queue->inflight_packets); |
| 1398 | uarg->callback(uarg, true); | 1404 | uarg->callback(uarg, true); |
| 1399 | skb_shinfo(skb)->destructor_arg = NULL; | 1405 | skb_shinfo(skb)->destructor_arg = NULL; |
| 1400 | 1406 | ||
| 1401 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1407 | /* Fill the skb with the new (local) frags. */ |
| 1402 | kfree_skb(nskb); | 1408 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
| 1409 | skb_shinfo(skb)->nr_frags = i; | ||
| 1410 | skb->truesize += i * PAGE_SIZE; | ||
| 1403 | 1411 | ||
| 1404 | return 0; | 1412 | return 0; |
| 1405 | } | 1413 | } |
| @@ -1649,17 +1657,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, | |||
| 1649 | { | 1657 | { |
| 1650 | struct pending_tx_info *pending_tx_info; | 1658 | struct pending_tx_info *pending_tx_info; |
| 1651 | pending_ring_idx_t index; | 1659 | pending_ring_idx_t index; |
| 1660 | int notify; | ||
| 1652 | unsigned long flags; | 1661 | unsigned long flags; |
| 1653 | 1662 | ||
| 1654 | pending_tx_info = &queue->pending_tx_info[pending_idx]; | 1663 | pending_tx_info = &queue->pending_tx_info[pending_idx]; |
| 1664 | |||
| 1655 | spin_lock_irqsave(&queue->response_lock, flags); | 1665 | spin_lock_irqsave(&queue->response_lock, flags); |
| 1666 | |||
| 1656 | make_tx_response(queue, &pending_tx_info->req, status); | 1667 | make_tx_response(queue, &pending_tx_info->req, status); |
| 1657 | index = pending_index(queue->pending_prod); | 1668 | |
| 1669 | /* Release the pending index before pusing the Tx response so | ||
| 1670 | * its available before a new Tx request is pushed by the | ||
| 1671 | * frontend. | ||
| 1672 | */ | ||
| 1673 | index = pending_index(queue->pending_prod++); | ||
| 1658 | queue->pending_ring[index] = pending_idx; | 1674 | queue->pending_ring[index] = pending_idx; |
| 1659 | /* TX shouldn't use the index before we give it back here */ | 1675 | |
| 1660 | mb(); | 1676 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); |
| 1661 | queue->pending_prod++; | 1677 | |
| 1662 | spin_unlock_irqrestore(&queue->response_lock, flags); | 1678 | spin_unlock_irqrestore(&queue->response_lock, flags); |
| 1679 | |||
| 1680 | if (notify) | ||
| 1681 | notify_remote_via_irq(queue->tx_irq); | ||
| 1663 | } | 1682 | } |
| 1664 | 1683 | ||
| 1665 | 1684 | ||
| @@ -1669,7 +1688,6 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
| 1669 | { | 1688 | { |
| 1670 | RING_IDX i = queue->tx.rsp_prod_pvt; | 1689 | RING_IDX i = queue->tx.rsp_prod_pvt; |
| 1671 | struct xen_netif_tx_response *resp; | 1690 | struct xen_netif_tx_response *resp; |
| 1672 | int notify; | ||
| 1673 | 1691 | ||
| 1674 | resp = RING_GET_RESPONSE(&queue->tx, i); | 1692 | resp = RING_GET_RESPONSE(&queue->tx, i); |
| 1675 | resp->id = txp->id; | 1693 | resp->id = txp->id; |
| @@ -1679,9 +1697,6 @@ static void make_tx_response(struct xenvif_queue *queue, | |||
| 1679 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; | 1697 | RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; |
| 1680 | 1698 | ||
| 1681 | queue->tx.rsp_prod_pvt = ++i; | 1699 | queue->tx.rsp_prod_pvt = ++i; |
| 1682 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); | ||
| 1683 | if (notify) | ||
| 1684 | notify_remote_via_irq(queue->tx_irq); | ||
| 1685 | } | 1700 | } |
| 1686 | 1701 | ||
| 1687 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, | 1702 | static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 38d1c51f58b1..7bcaeec876c0 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
| @@ -84,8 +84,7 @@ config OF_RESOLVE | |||
| 84 | bool | 84 | bool |
| 85 | 85 | ||
| 86 | config OF_OVERLAY | 86 | config OF_OVERLAY |
| 87 | bool | 87 | bool "Device Tree overlays" |
| 88 | depends on OF | ||
| 89 | select OF_DYNAMIC | 88 | select OF_DYNAMIC |
| 90 | select OF_RESOLVE | 89 | select OF_RESOLVE |
| 91 | 90 | ||
diff --git a/drivers/of/base.c b/drivers/of/base.c index 0a8aeb8523fe..adb8764861c0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -714,16 +714,17 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, | |||
| 714 | const char *path) | 714 | const char *path) |
| 715 | { | 715 | { |
| 716 | struct device_node *child; | 716 | struct device_node *child; |
| 717 | int len = strchrnul(path, '/') - path; | 717 | int len; |
| 718 | int term; | 718 | const char *end; |
| 719 | 719 | ||
| 720 | end = strchr(path, ':'); | ||
| 721 | if (!end) | ||
| 722 | end = strchrnul(path, '/'); | ||
| 723 | |||
| 724 | len = end - path; | ||
| 720 | if (!len) | 725 | if (!len) |
| 721 | return NULL; | 726 | return NULL; |
| 722 | 727 | ||
| 723 | term = strchrnul(path, ':') - path; | ||
| 724 | if (term < len) | ||
| 725 | len = term; | ||
| 726 | |||
| 727 | __for_each_child_of_node(parent, child) { | 728 | __for_each_child_of_node(parent, child) { |
| 728 | const char *name = strrchr(child->full_name, '/'); | 729 | const char *name = strrchr(child->full_name, '/'); |
| 729 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) | 730 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) |
| @@ -768,8 +769,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
| 768 | 769 | ||
| 769 | /* The path could begin with an alias */ | 770 | /* The path could begin with an alias */ |
| 770 | if (*path != '/') { | 771 | if (*path != '/') { |
| 771 | char *p = strchrnul(path, '/'); | 772 | int len; |
| 772 | int len = separator ? separator - path : p - path; | 773 | const char *p = separator; |
| 774 | |||
| 775 | if (!p) | ||
| 776 | p = strchrnul(path, '/'); | ||
| 777 | len = p - path; | ||
| 773 | 778 | ||
| 774 | /* of_aliases must not be NULL */ | 779 | /* of_aliases must not be NULL */ |
| 775 | if (!of_aliases) | 780 | if (!of_aliases) |
| @@ -794,6 +799,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
| 794 | path++; /* Increment past '/' delimiter */ | 799 | path++; /* Increment past '/' delimiter */ |
| 795 | np = __of_find_node_by_path(np, path); | 800 | np = __of_find_node_by_path(np, path); |
| 796 | path = strchrnul(path, '/'); | 801 | path = strchrnul(path, '/'); |
| 802 | if (separator && separator < path) | ||
| 803 | break; | ||
| 797 | } | 804 | } |
| 798 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | 805 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
| 799 | return np; | 806 | return np; |
| @@ -1886,8 +1893,10 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
| 1886 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); | 1893 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); |
| 1887 | if (IS_ENABLED(CONFIG_PPC) && !name) | 1894 | if (IS_ENABLED(CONFIG_PPC) && !name) |
| 1888 | name = of_get_property(of_aliases, "stdout", NULL); | 1895 | name = of_get_property(of_aliases, "stdout", NULL); |
| 1889 | if (name) | 1896 | if (name) { |
| 1890 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); | 1897 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); |
| 1898 | add_preferred_console("stdout-path", 0, NULL); | ||
| 1899 | } | ||
| 1891 | } | 1900 | } |
| 1892 | 1901 | ||
| 1893 | if (!of_aliases) | 1902 | if (!of_aliases) |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 352b4f28f82c..dee9270ba547 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
| 22 | #include <linux/idr.h> | ||
| 22 | 23 | ||
| 23 | #include "of_private.h" | 24 | #include "of_private.h" |
| 24 | 25 | ||
| @@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, | |||
| 85 | struct device_node *target, struct device_node *child) | 86 | struct device_node *target, struct device_node *child) |
| 86 | { | 87 | { |
| 87 | const char *cname; | 88 | const char *cname; |
| 88 | struct device_node *tchild, *grandchild; | 89 | struct device_node *tchild; |
| 89 | int ret = 0; | 90 | int ret = 0; |
| 90 | 91 | ||
| 91 | cname = kbasename(child->full_name); | 92 | cname = kbasename(child->full_name); |
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0cf9a236d438..aba8946cac46 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c | |||
| @@ -92,6 +92,11 @@ static void __init of_selftest_find_node_by_name(void) | |||
| 92 | "option path test failed\n"); | 92 | "option path test failed\n"); |
| 93 | of_node_put(np); | 93 | of_node_put(np); |
| 94 | 94 | ||
| 95 | np = of_find_node_opts_by_path("/testcase-data:test/option", &options); | ||
| 96 | selftest(np && !strcmp("test/option", options), | ||
| 97 | "option path test, subcase #1 failed\n"); | ||
| 98 | of_node_put(np); | ||
| 99 | |||
| 95 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); | 100 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); |
| 96 | selftest(np, "NULL option path test failed\n"); | 101 | selftest(np, "NULL option path test failed\n"); |
| 97 | of_node_put(np); | 102 | of_node_put(np); |
| @@ -102,6 +107,12 @@ static void __init of_selftest_find_node_by_name(void) | |||
| 102 | "option alias path test failed\n"); | 107 | "option alias path test failed\n"); |
| 103 | of_node_put(np); | 108 | of_node_put(np); |
| 104 | 109 | ||
| 110 | np = of_find_node_opts_by_path("testcase-alias:test/alias/option", | ||
| 111 | &options); | ||
| 112 | selftest(np && !strcmp("test/alias/option", options), | ||
| 113 | "option alias path test, subcase #1 failed\n"); | ||
| 114 | of_node_put(np); | ||
| 115 | |||
| 105 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); | 116 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); |
| 106 | selftest(np, "NULL option alias path test failed\n"); | 117 | selftest(np, "NULL option alias path test failed\n"); |
| 107 | of_node_put(np); | 118 | of_node_put(np); |
| @@ -378,9 +389,9 @@ static void __init of_selftest_property_string(void) | |||
| 378 | rc = of_property_match_string(np, "phandle-list-names", "first"); | 389 | rc = of_property_match_string(np, "phandle-list-names", "first"); |
| 379 | selftest(rc == 0, "first expected:0 got:%i\n", rc); | 390 | selftest(rc == 0, "first expected:0 got:%i\n", rc); |
| 380 | rc = of_property_match_string(np, "phandle-list-names", "second"); | 391 | rc = of_property_match_string(np, "phandle-list-names", "second"); |
| 381 | selftest(rc == 1, "second expected:0 got:%i\n", rc); | 392 | selftest(rc == 1, "second expected:1 got:%i\n", rc); |
| 382 | rc = of_property_match_string(np, "phandle-list-names", "third"); | 393 | rc = of_property_match_string(np, "phandle-list-names", "third"); |
| 383 | selftest(rc == 2, "third expected:0 got:%i\n", rc); | 394 | selftest(rc == 2, "third expected:2 got:%i\n", rc); |
| 384 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); | 395 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); |
| 385 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); | 396 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); |
| 386 | rc = of_property_match_string(np, "missing-property", "blah"); | 397 | rc = of_property_match_string(np, "missing-property", "blah"); |
| @@ -478,7 +489,6 @@ static void __init of_selftest_changeset(void) | |||
| 478 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; | 489 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; |
| 479 | struct of_changeset chgset; | 490 | struct of_changeset chgset; |
| 480 | 491 | ||
| 481 | of_changeset_init(&chgset); | ||
| 482 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); | 492 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); |
| 483 | selftest(n1, "testcase setup failure\n"); | 493 | selftest(n1, "testcase setup failure\n"); |
| 484 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); | 494 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); |
| @@ -979,7 +989,7 @@ static int of_path_platform_device_exists(const char *path) | |||
| 979 | return pdev != NULL; | 989 | return pdev != NULL; |
| 980 | } | 990 | } |
| 981 | 991 | ||
| 982 | #if IS_ENABLED(CONFIG_I2C) | 992 | #if IS_BUILTIN(CONFIG_I2C) |
| 983 | 993 | ||
| 984 | /* get the i2c client device instantiated at the path */ | 994 | /* get the i2c client device instantiated at the path */ |
| 985 | static struct i2c_client *of_path_to_i2c_client(const char *path) | 995 | static struct i2c_client *of_path_to_i2c_client(const char *path) |
| @@ -1445,7 +1455,7 @@ static void of_selftest_overlay_11(void) | |||
| 1445 | return; | 1455 | return; |
| 1446 | } | 1456 | } |
| 1447 | 1457 | ||
| 1448 | #if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) | 1458 | #if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) |
| 1449 | 1459 | ||
| 1450 | struct selftest_i2c_bus_data { | 1460 | struct selftest_i2c_bus_data { |
| 1451 | struct platform_device *pdev; | 1461 | struct platform_device *pdev; |
| @@ -1584,7 +1594,7 @@ static struct i2c_driver selftest_i2c_dev_driver = { | |||
| 1584 | .id_table = selftest_i2c_dev_id, | 1594 | .id_table = selftest_i2c_dev_id, |
| 1585 | }; | 1595 | }; |
| 1586 | 1596 | ||
| 1587 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1597 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
| 1588 | 1598 | ||
| 1589 | struct selftest_i2c_mux_data { | 1599 | struct selftest_i2c_mux_data { |
| 1590 | int nchans; | 1600 | int nchans; |
| @@ -1695,7 +1705,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
| 1695 | "could not register selftest i2c bus driver\n")) | 1705 | "could not register selftest i2c bus driver\n")) |
| 1696 | return ret; | 1706 | return ret; |
| 1697 | 1707 | ||
| 1698 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1708 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
| 1699 | ret = i2c_add_driver(&selftest_i2c_mux_driver); | 1709 | ret = i2c_add_driver(&selftest_i2c_mux_driver); |
| 1700 | if (selftest(ret == 0, | 1710 | if (selftest(ret == 0, |
| 1701 | "could not register selftest i2c mux driver\n")) | 1711 | "could not register selftest i2c mux driver\n")) |
| @@ -1707,7 +1717,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
| 1707 | 1717 | ||
| 1708 | static void of_selftest_overlay_i2c_cleanup(void) | 1718 | static void of_selftest_overlay_i2c_cleanup(void) |
| 1709 | { | 1719 | { |
| 1710 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1720 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
| 1711 | i2c_del_driver(&selftest_i2c_mux_driver); | 1721 | i2c_del_driver(&selftest_i2c_mux_driver); |
| 1712 | #endif | 1722 | #endif |
| 1713 | platform_driver_unregister(&selftest_i2c_bus_driver); | 1723 | platform_driver_unregister(&selftest_i2c_bus_driver); |
| @@ -1814,7 +1824,7 @@ static void __init of_selftest_overlay(void) | |||
| 1814 | of_selftest_overlay_10(); | 1824 | of_selftest_overlay_10(); |
| 1815 | of_selftest_overlay_11(); | 1825 | of_selftest_overlay_11(); |
| 1816 | 1826 | ||
| 1817 | #if IS_ENABLED(CONFIG_I2C) | 1827 | #if IS_BUILTIN(CONFIG_I2C) |
| 1818 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) | 1828 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) |
| 1819 | goto out; | 1829 | goto out; |
| 1820 | 1830 | ||
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 1ec694a52379..464bf492ee2a 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c | |||
| @@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, | |||
| 80 | if (err) | 80 | if (err) |
| 81 | return err; | 81 | return err; |
| 82 | 82 | ||
| 83 | resource_list_for_each_entry(win, res, list) { | 83 | resource_list_for_each_entry(win, res) { |
| 84 | struct resource *parent, *res = win->res; | 84 | struct resource *parent, *res = win->res; |
| 85 | 85 | ||
| 86 | switch (resource_type(res)) { | 86 | switch (resource_type(res)) { |
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
| @@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) | |||
| 127 | return false; | 127 | return false; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | 130 | static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, |
| 131 | int offset) | 131 | int offset) |
| 132 | { | 132 | { |
| 133 | struct xgene_pcie_port *port = bus->sysdata; | 133 | struct xgene_pcie_port *port = bus->sysdata; |
| @@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | |||
| 137 | return NULL; | 137 | return NULL; |
| 138 | 138 | ||
| 139 | xgene_pcie_set_rtdid_reg(bus, devfn); | 139 | xgene_pcie_set_rtdid_reg(bus, devfn); |
| 140 | return xgene_pcie_get_cfg_base(bus); | 140 | return xgene_pcie_get_cfg_base(bus) + offset; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static struct pci_ops xgene_pcie_ops = { | 143 | static struct pci_ops xgene_pcie_ops = { |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, | |||
| 521 | struct pci_dev *pdev = to_pci_dev(dev); | 521 | struct pci_dev *pdev = to_pci_dev(dev); |
| 522 | char *driver_override, *old = pdev->driver_override, *cp; | 522 | char *driver_override, *old = pdev->driver_override, *cp; |
| 523 | 523 | ||
| 524 | if (count > PATH_MAX) | 524 | /* We need to keep extra room for a newline */ |
| 525 | if (count >= (PAGE_SIZE - 1)) | ||
| 525 | return -EINVAL; | 526 | return -EINVAL; |
| 526 | 527 | ||
| 527 | driver_override = kstrndup(buf, count, GFP_KERNEL); | 528 | driver_override = kstrndup(buf, count, GFP_KERNEL); |
| @@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, | |||
| 549 | { | 550 | { |
| 550 | struct pci_dev *pdev = to_pci_dev(dev); | 551 | struct pci_dev *pdev = to_pci_dev(dev); |
| 551 | 552 | ||
| 552 | return sprintf(buf, "%s\n", pdev->driver_override); | 553 | return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); |
| 553 | } | 554 | } |
| 554 | static DEVICE_ATTR_RW(driver_override); | 555 | static DEVICE_ATTR_RW(driver_override); |
| 555 | 556 | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b899947d839d..1245dca79009 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -3444,13 +3444,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj, | |||
| 3444 | if (attr == &dev_attr_requested_microamps.attr) | 3444 | if (attr == &dev_attr_requested_microamps.attr) |
| 3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; | 3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; |
| 3446 | 3446 | ||
| 3447 | /* all the other attributes exist to support constraints; | ||
| 3448 | * don't show them if there are no constraints, or if the | ||
| 3449 | * relevant supporting methods are missing. | ||
| 3450 | */ | ||
| 3451 | if (!rdev->constraints) | ||
| 3452 | return 0; | ||
| 3453 | |||
| 3454 | /* constraints need specific supporting methods */ | 3447 | /* constraints need specific supporting methods */ |
| 3455 | if (attr == &dev_attr_min_microvolts.attr || | 3448 | if (attr == &dev_attr_min_microvolts.attr || |
| 3456 | attr == &dev_attr_max_microvolts.attr) | 3449 | attr == &dev_attr_max_microvolts.attr) |
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index bc6100103f7f..f0489cb9018b 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c | |||
| @@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c, | |||
| 152 | config.regmap = chip->regmap; | 152 | config.regmap = chip->regmap; |
| 153 | config.of_node = dev->of_node; | 153 | config.of_node = dev->of_node; |
| 154 | 154 | ||
| 155 | /* Mask all interrupt sources to deassert interrupt line */ | ||
| 156 | error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0); | ||
| 157 | if (!error) | ||
| 158 | error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0); | ||
| 159 | if (error) { | ||
| 160 | dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error); | ||
| 161 | return error; | ||
| 162 | } | ||
| 163 | |||
| 155 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); | 164 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); |
| 156 | if (IS_ERR(rdev)) { | 165 | if (IS_ERR(rdev)) { |
| 157 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); | 166 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); |
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 1f93b752a81c..3fd44353cc80 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c | |||
| @@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 235 | .vsel_mask = RK808_LDO_VSEL_MASK, | 235 | .vsel_mask = RK808_LDO_VSEL_MASK, |
| 236 | .enable_reg = RK808_LDO_EN_REG, | 236 | .enable_reg = RK808_LDO_EN_REG, |
| 237 | .enable_mask = BIT(0), | 237 | .enable_mask = BIT(0), |
| 238 | .enable_time = 400, | ||
| 238 | .owner = THIS_MODULE, | 239 | .owner = THIS_MODULE, |
| 239 | }, { | 240 | }, { |
| 240 | .name = "LDO_REG2", | 241 | .name = "LDO_REG2", |
| @@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 249 | .vsel_mask = RK808_LDO_VSEL_MASK, | 250 | .vsel_mask = RK808_LDO_VSEL_MASK, |
| 250 | .enable_reg = RK808_LDO_EN_REG, | 251 | .enable_reg = RK808_LDO_EN_REG, |
| 251 | .enable_mask = BIT(1), | 252 | .enable_mask = BIT(1), |
| 253 | .enable_time = 400, | ||
| 252 | .owner = THIS_MODULE, | 254 | .owner = THIS_MODULE, |
| 253 | }, { | 255 | }, { |
| 254 | .name = "LDO_REG3", | 256 | .name = "LDO_REG3", |
| @@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 263 | .vsel_mask = RK808_BUCK4_VSEL_MASK, | 265 | .vsel_mask = RK808_BUCK4_VSEL_MASK, |
| 264 | .enable_reg = RK808_LDO_EN_REG, | 266 | .enable_reg = RK808_LDO_EN_REG, |
| 265 | .enable_mask = BIT(2), | 267 | .enable_mask = BIT(2), |
| 268 | .enable_time = 400, | ||
| 266 | .owner = THIS_MODULE, | 269 | .owner = THIS_MODULE, |
| 267 | }, { | 270 | }, { |
| 268 | .name = "LDO_REG4", | 271 | .name = "LDO_REG4", |
| @@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 277 | .vsel_mask = RK808_LDO_VSEL_MASK, | 280 | .vsel_mask = RK808_LDO_VSEL_MASK, |
| 278 | .enable_reg = RK808_LDO_EN_REG, | 281 | .enable_reg = RK808_LDO_EN_REG, |
| 279 | .enable_mask = BIT(3), | 282 | .enable_mask = BIT(3), |
| 283 | .enable_time = 400, | ||
| 280 | .owner = THIS_MODULE, | 284 | .owner = THIS_MODULE, |
| 281 | }, { | 285 | }, { |
| 282 | .name = "LDO_REG5", | 286 | .name = "LDO_REG5", |
| @@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 291 | .vsel_mask = RK808_LDO_VSEL_MASK, | 295 | .vsel_mask = RK808_LDO_VSEL_MASK, |
| 292 | .enable_reg = RK808_LDO_EN_REG, | 296 | .enable_reg = RK808_LDO_EN_REG, |
| 293 | .enable_mask = BIT(4), | 297 | .enable_mask = BIT(4), |
| 298 | .enable_time = 400, | ||
| 294 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
| 295 | }, { | 300 | }, { |
| 296 | .name = "LDO_REG6", | 301 | .name = "LDO_REG6", |
| @@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 305 | .vsel_mask = RK808_LDO_VSEL_MASK, | 310 | .vsel_mask = RK808_LDO_VSEL_MASK, |
| 306 | .enable_reg = RK808_LDO_EN_REG, | 311 | .enable_reg = RK808_LDO_EN_REG, |
| 307 | .enable_mask = BIT(5), | 312 | .enable_mask = BIT(5), |
| 313 | .enable_time = 400, | ||
| 308 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
| 309 | }, { | 315 | }, { |
| 310 | .name = "LDO_REG7", | 316 | .name = "LDO_REG7", |
| @@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 319 | .vsel_mask = RK808_LDO_VSEL_MASK, | 325 | .vsel_mask = RK808_LDO_VSEL_MASK, |
| 320 | .enable_reg = RK808_LDO_EN_REG, | 326 | .enable_reg = RK808_LDO_EN_REG, |
| 321 | .enable_mask = BIT(6), | 327 | .enable_mask = BIT(6), |
| 328 | .enable_time = 400, | ||
| 322 | .owner = THIS_MODULE, | 329 | .owner = THIS_MODULE, |
| 323 | }, { | 330 | }, { |
| 324 | .name = "LDO_REG8", | 331 | .name = "LDO_REG8", |
| @@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
| 333 | .vsel_mask = RK808_LDO_VSEL_MASK, | 340 | .vsel_mask = RK808_LDO_VSEL_MASK, |
| 334 | .enable_reg = RK808_LDO_EN_REG, | 341 | .enable_reg = RK808_LDO_EN_REG, |
| 335 | .enable_mask = BIT(7), | 342 | .enable_mask = BIT(7), |
| 343 | .enable_time = 400, | ||
| 336 | .owner = THIS_MODULE, | 344 | .owner = THIS_MODULE, |
| 337 | }, { | 345 | }, { |
| 338 | .name = "SWITCH_REG1", | 346 | .name = "SWITCH_REG1", |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 70a5d94cc766..b4f7744f6751 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
| 32 | #include <linux/of.h> | 32 | #include <linux/of.h> |
| 33 | #include <linux/of_device.h> | 33 | #include <linux/of_device.h> |
| 34 | #include <linux/suspend.h> | ||
| 34 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
| 35 | 36 | ||
| 36 | #include "rtc-at91rm9200.h" | 37 | #include "rtc-at91rm9200.h" |
| @@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs; | |||
| 54 | static int irq; | 55 | static int irq; |
| 55 | static DEFINE_SPINLOCK(at91_rtc_lock); | 56 | static DEFINE_SPINLOCK(at91_rtc_lock); |
| 56 | static u32 at91_rtc_shadow_imr; | 57 | static u32 at91_rtc_shadow_imr; |
| 58 | static bool suspended; | ||
| 59 | static DEFINE_SPINLOCK(suspended_lock); | ||
| 60 | static unsigned long cached_events; | ||
| 61 | static u32 at91_rtc_imr; | ||
| 57 | 62 | ||
| 58 | static void at91_rtc_write_ier(u32 mask) | 63 | static void at91_rtc_write_ier(u32 mask) |
| 59 | { | 64 | { |
| @@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
| 290 | struct rtc_device *rtc = platform_get_drvdata(pdev); | 295 | struct rtc_device *rtc = platform_get_drvdata(pdev); |
| 291 | unsigned int rtsr; | 296 | unsigned int rtsr; |
| 292 | unsigned long events = 0; | 297 | unsigned long events = 0; |
| 298 | int ret = IRQ_NONE; | ||
| 293 | 299 | ||
| 300 | spin_lock(&suspended_lock); | ||
| 294 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); | 301 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); |
| 295 | if (rtsr) { /* this interrupt is shared! Is it ours? */ | 302 | if (rtsr) { /* this interrupt is shared! Is it ours? */ |
| 296 | if (rtsr & AT91_RTC_ALARM) | 303 | if (rtsr & AT91_RTC_ALARM) |
| @@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
| 304 | 311 | ||
| 305 | at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ | 312 | at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ |
| 306 | 313 | ||
| 307 | rtc_update_irq(rtc, 1, events); | 314 | if (!suspended) { |
| 315 | rtc_update_irq(rtc, 1, events); | ||
| 308 | 316 | ||
| 309 | dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__, | 317 | dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", |
| 310 | events >> 8, events & 0x000000FF); | 318 | __func__, events >> 8, events & 0x000000FF); |
| 319 | } else { | ||
| 320 | cached_events |= events; | ||
| 321 | at91_rtc_write_idr(at91_rtc_imr); | ||
| 322 | pm_system_wakeup(); | ||
| 323 | } | ||
| 311 | 324 | ||
| 312 | return IRQ_HANDLED; | 325 | ret = IRQ_HANDLED; |
| 313 | } | 326 | } |
| 314 | return IRQ_NONE; /* not handled */ | 327 | spin_lock(&suspended_lock); |
| 328 | |||
| 329 | return ret; | ||
| 315 | } | 330 | } |
| 316 | 331 | ||
| 317 | static const struct at91_rtc_config at91rm9200_config = { | 332 | static const struct at91_rtc_config at91rm9200_config = { |
| @@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev) | |||
| 401 | AT91_RTC_CALEV); | 416 | AT91_RTC_CALEV); |
| 402 | 417 | ||
| 403 | ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt, | 418 | ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt, |
| 404 | IRQF_SHARED, | 419 | IRQF_SHARED | IRQF_COND_SUSPEND, |
| 405 | "at91_rtc", pdev); | 420 | "at91_rtc", pdev); |
| 406 | if (ret) { | 421 | if (ret) { |
| 407 | dev_err(&pdev->dev, "IRQ %d already in use.\n", irq); | 422 | dev_err(&pdev->dev, "IRQ %d already in use.\n", irq); |
| 408 | return ret; | 423 | return ret; |
| @@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev) | |||
| 454 | 469 | ||
| 455 | /* AT91RM9200 RTC Power management control */ | 470 | /* AT91RM9200 RTC Power management control */ |
| 456 | 471 | ||
| 457 | static u32 at91_rtc_imr; | ||
| 458 | |||
| 459 | static int at91_rtc_suspend(struct device *dev) | 472 | static int at91_rtc_suspend(struct device *dev) |
| 460 | { | 473 | { |
| 461 | /* this IRQ is shared with DBGU and other hardware which isn't | 474 | /* this IRQ is shared with DBGU and other hardware which isn't |
| @@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev) | |||
| 464 | at91_rtc_imr = at91_rtc_read_imr() | 477 | at91_rtc_imr = at91_rtc_read_imr() |
| 465 | & (AT91_RTC_ALARM|AT91_RTC_SECEV); | 478 | & (AT91_RTC_ALARM|AT91_RTC_SECEV); |
| 466 | if (at91_rtc_imr) { | 479 | if (at91_rtc_imr) { |
| 467 | if (device_may_wakeup(dev)) | 480 | if (device_may_wakeup(dev)) { |
| 481 | unsigned long flags; | ||
| 482 | |||
| 468 | enable_irq_wake(irq); | 483 | enable_irq_wake(irq); |
| 469 | else | 484 | |
| 485 | spin_lock_irqsave(&suspended_lock, flags); | ||
| 486 | suspended = true; | ||
| 487 | spin_unlock_irqrestore(&suspended_lock, flags); | ||
| 488 | } else { | ||
| 470 | at91_rtc_write_idr(at91_rtc_imr); | 489 | at91_rtc_write_idr(at91_rtc_imr); |
| 490 | } | ||
| 471 | } | 491 | } |
| 472 | return 0; | 492 | return 0; |
| 473 | } | 493 | } |
| 474 | 494 | ||
| 475 | static int at91_rtc_resume(struct device *dev) | 495 | static int at91_rtc_resume(struct device *dev) |
| 476 | { | 496 | { |
| 497 | struct rtc_device *rtc = dev_get_drvdata(dev); | ||
| 498 | |||
| 477 | if (at91_rtc_imr) { | 499 | if (at91_rtc_imr) { |
| 478 | if (device_may_wakeup(dev)) | 500 | if (device_may_wakeup(dev)) { |
| 501 | unsigned long flags; | ||
| 502 | |||
| 503 | spin_lock_irqsave(&suspended_lock, flags); | ||
| 504 | |||
| 505 | if (cached_events) { | ||
| 506 | rtc_update_irq(rtc, 1, cached_events); | ||
| 507 | cached_events = 0; | ||
| 508 | } | ||
| 509 | |||
| 510 | suspended = false; | ||
| 511 | spin_unlock_irqrestore(&suspended_lock, flags); | ||
| 512 | |||
| 479 | disable_irq_wake(irq); | 513 | disable_irq_wake(irq); |
| 480 | else | 514 | } |
| 481 | at91_rtc_write_ier(at91_rtc_imr); | 515 | at91_rtc_write_ier(at91_rtc_imr); |
| 482 | } | 516 | } |
| 483 | return 0; | 517 | return 0; |
| 484 | } | 518 | } |
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index 2183fd2750ab..5ccaee32df72 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
| 24 | #include <linux/mfd/syscon.h> | 24 | #include <linux/mfd/syscon.h> |
| 25 | #include <linux/regmap.h> | 25 | #include <linux/regmap.h> |
| 26 | #include <linux/suspend.h> | ||
| 26 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
| 27 | 28 | ||
| 28 | /* | 29 | /* |
| @@ -77,6 +78,9 @@ struct sam9_rtc { | |||
| 77 | unsigned int gpbr_offset; | 78 | unsigned int gpbr_offset; |
| 78 | int irq; | 79 | int irq; |
| 79 | struct clk *sclk; | 80 | struct clk *sclk; |
| 81 | bool suspended; | ||
| 82 | unsigned long events; | ||
| 83 | spinlock_t lock; | ||
| 80 | }; | 84 | }; |
| 81 | 85 | ||
| 82 | #define rtt_readl(rtc, field) \ | 86 | #define rtt_readl(rtc, field) \ |
| @@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) | |||
| 271 | return 0; | 275 | return 0; |
| 272 | } | 276 | } |
| 273 | 277 | ||
| 274 | /* | 278 | static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc) |
| 275 | * IRQ handler for the RTC | ||
| 276 | */ | ||
| 277 | static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) | ||
| 278 | { | 279 | { |
| 279 | struct sam9_rtc *rtc = _rtc; | ||
| 280 | u32 sr, mr; | 280 | u32 sr, mr; |
| 281 | unsigned long events = 0; | ||
| 282 | 281 | ||
| 283 | /* Shared interrupt may be for another device. Note: reading | 282 | /* Shared interrupt may be for another device. Note: reading |
| 284 | * SR clears it, so we must only read it in this irq handler! | 283 | * SR clears it, so we must only read it in this irq handler! |
| @@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) | |||
| 290 | 289 | ||
| 291 | /* alarm status */ | 290 | /* alarm status */ |
| 292 | if (sr & AT91_RTT_ALMS) | 291 | if (sr & AT91_RTT_ALMS) |
| 293 | events |= (RTC_AF | RTC_IRQF); | 292 | rtc->events |= (RTC_AF | RTC_IRQF); |
| 294 | 293 | ||
| 295 | /* timer update/increment */ | 294 | /* timer update/increment */ |
| 296 | if (sr & AT91_RTT_RTTINC) | 295 | if (sr & AT91_RTT_RTTINC) |
| 297 | events |= (RTC_UF | RTC_IRQF); | 296 | rtc->events |= (RTC_UF | RTC_IRQF); |
| 297 | |||
| 298 | return IRQ_HANDLED; | ||
| 299 | } | ||
| 300 | |||
| 301 | static void at91_rtc_flush_events(struct sam9_rtc *rtc) | ||
| 302 | { | ||
| 303 | if (!rtc->events) | ||
| 304 | return; | ||
| 298 | 305 | ||
| 299 | rtc_update_irq(rtc->rtcdev, 1, events); | 306 | rtc_update_irq(rtc->rtcdev, 1, rtc->events); |
| 307 | rtc->events = 0; | ||
| 300 | 308 | ||
| 301 | pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, | 309 | pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, |
| 302 | events >> 8, events & 0x000000FF); | 310 | rtc->events >> 8, rtc->events & 0x000000FF); |
| 311 | } | ||
| 303 | 312 | ||
| 304 | return IRQ_HANDLED; | 313 | /* |
| 314 | * IRQ handler for the RTC | ||
| 315 | */ | ||
| 316 | static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) | ||
| 317 | { | ||
| 318 | struct sam9_rtc *rtc = _rtc; | ||
| 319 | int ret; | ||
| 320 | |||
| 321 | spin_lock(&rtc->lock); | ||
| 322 | |||
| 323 | ret = at91_rtc_cache_events(rtc); | ||
| 324 | |||
| 325 | /* We're called in suspended state */ | ||
| 326 | if (rtc->suspended) { | ||
| 327 | /* Mask irqs coming from this peripheral */ | ||
| 328 | rtt_writel(rtc, MR, | ||
| 329 | rtt_readl(rtc, MR) & | ||
| 330 | ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); | ||
| 331 | /* Trigger a system wakeup */ | ||
| 332 | pm_system_wakeup(); | ||
| 333 | } else { | ||
| 334 | at91_rtc_flush_events(rtc); | ||
| 335 | } | ||
| 336 | |||
| 337 | spin_unlock(&rtc->lock); | ||
| 338 | |||
| 339 | return ret; | ||
| 305 | } | 340 | } |
| 306 | 341 | ||
| 307 | static const struct rtc_class_ops at91_rtc_ops = { | 342 | static const struct rtc_class_ops at91_rtc_ops = { |
| @@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev) | |||
| 421 | 456 | ||
| 422 | /* register irq handler after we know what name we'll use */ | 457 | /* register irq handler after we know what name we'll use */ |
| 423 | ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, | 458 | ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, |
| 424 | IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); | 459 | IRQF_SHARED | IRQF_COND_SUSPEND, |
| 460 | dev_name(&rtc->rtcdev->dev), rtc); | ||
| 425 | if (ret) { | 461 | if (ret) { |
| 426 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); | 462 | dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); |
| 427 | return ret; | 463 | return ret; |
| @@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev) | |||
| 482 | rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); | 518 | rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); |
| 483 | if (rtc->imr) { | 519 | if (rtc->imr) { |
| 484 | if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) { | 520 | if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) { |
| 521 | unsigned long flags; | ||
| 522 | |||
| 485 | enable_irq_wake(rtc->irq); | 523 | enable_irq_wake(rtc->irq); |
| 524 | spin_lock_irqsave(&rtc->lock, flags); | ||
| 525 | rtc->suspended = true; | ||
| 526 | spin_unlock_irqrestore(&rtc->lock, flags); | ||
| 486 | /* don't let RTTINC cause wakeups */ | 527 | /* don't let RTTINC cause wakeups */ |
| 487 | if (mr & AT91_RTT_RTTINCIEN) | 528 | if (mr & AT91_RTT_RTTINCIEN) |
| 488 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); | 529 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); |
| @@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev) | |||
| 499 | u32 mr; | 540 | u32 mr; |
| 500 | 541 | ||
| 501 | if (rtc->imr) { | 542 | if (rtc->imr) { |
| 543 | unsigned long flags; | ||
| 544 | |||
| 502 | if (device_may_wakeup(dev)) | 545 | if (device_may_wakeup(dev)) |
| 503 | disable_irq_wake(rtc->irq); | 546 | disable_irq_wake(rtc->irq); |
| 504 | mr = rtt_readl(rtc, MR); | 547 | mr = rtt_readl(rtc, MR); |
| 505 | rtt_writel(rtc, MR, mr | rtc->imr); | 548 | rtt_writel(rtc, MR, mr | rtc->imr); |
| 549 | |||
| 550 | spin_lock_irqsave(&rtc->lock, flags); | ||
| 551 | rtc->suspended = false; | ||
| 552 | at91_rtc_cache_events(rtc); | ||
| 553 | at91_rtc_flush_events(rtc); | ||
| 554 | spin_unlock_irqrestore(&rtc->lock, flags); | ||
| 506 | } | 555 | } |
| 507 | 556 | ||
| 508 | return 0; | 557 | return 0; |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { | |||
| 849 | 849 | ||
| 850 | static struct s3c_rtc_data const s3c6410_rtc_data = { | 850 | static struct s3c_rtc_data const s3c6410_rtc_data = { |
| 851 | .max_user_freq = 32768, | 851 | .max_user_freq = 32768, |
| 852 | .needs_src_clk = true, | ||
| 852 | .irq_handler = s3c6410_rtc_irq, | 853 | .irq_handler = s3c6410_rtc_irq, |
| 853 | .set_freq = s3c6410_rtc_setfreq, | 854 | .set_freq = s3c6410_rtc_setfreq, |
| 854 | .enable_tick = s3c6410_rtc_enable_tick, | 855 | .enable_tick = s3c6410_rtc_enable_tick, |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 96128cb009f3..da212813f2d5 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
| @@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
| 547 | * parse input | 547 | * parse input |
| 548 | */ | 548 | */ |
| 549 | num_of_segments = 0; | 549 | num_of_segments = 0; |
| 550 | for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { | 550 | for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { |
| 551 | for (j = i; (buf[j] != ':') && | 551 | for (j = i; (buf[j] != ':') && |
| 552 | (buf[j] != '\0') && | 552 | (buf[j] != '\0') && |
| 553 | (buf[j] != '\n') && | 553 | (buf[j] != '\n') && |
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 09db45296eed..7497ddde2dd6 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
| @@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) | |||
| 92 | add = 0; | 92 | add = 0; |
| 93 | continue; | 93 | continue; |
| 94 | } | 94 | } |
| 95 | for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { | 95 | for (pos = 0; pos < iter->aob->request.msb_count; pos++) { |
| 96 | if (clusters_intersect(req, iter->request[pos]) && | 96 | if (clusters_intersect(req, iter->request[pos]) && |
| 97 | (rq_data_dir(req) == WRITE || | 97 | (rq_data_dir(req) == WRITE || |
| 98 | rq_data_dir(iter->request[pos]) == WRITE)) { | 98 | rq_data_dir(iter->request[pos]) == WRITE)) { |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
| @@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
| 500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); | 500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
| 501 | struct asd_sas_port *port = ev->port; | 501 | struct asd_sas_port *port = ev->port; |
| 502 | struct sas_ha_struct *ha = port->ha; | 502 | struct sas_ha_struct *ha = port->ha; |
| 503 | struct domain_device *ddev = port->port_dev; | ||
| 503 | 504 | ||
| 504 | /* prevent revalidation from finding sata links in recovery */ | 505 | /* prevent revalidation from finding sata links in recovery */ |
| 505 | mutex_lock(&ha->disco_mutex); | 506 | mutex_lock(&ha->disco_mutex); |
| @@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
| 514 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, | 515 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, |
| 515 | task_pid_nr(current)); | 516 | task_pid_nr(current)); |
| 516 | 517 | ||
| 517 | if (port->port_dev) | 518 | if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || |
| 518 | res = sas_ex_revalidate_domain(port->port_dev); | 519 | ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) |
| 520 | res = sas_ex_revalidate_domain(ddev); | ||
| 519 | 521 | ||
| 520 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", | 522 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", |
| 521 | port->id, task_pid_nr(current), res); | 523 | port->id, task_pid_nr(current), res); |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9af7841f2e8c..06de34001c66 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
| @@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
| 764 | (unsigned long long)xfer->rx_dma); | 764 | (unsigned long long)xfer->rx_dma); |
| 765 | } | 765 | } |
| 766 | 766 | ||
| 767 | /* REVISIT: We're waiting for ENDRX before we start the next | 767 | /* REVISIT: We're waiting for RXBUFF before we start the next |
| 768 | * transfer because we need to handle some difficult timing | 768 | * transfer because we need to handle some difficult timing |
| 769 | * issues otherwise. If we wait for ENDTX in one transfer and | 769 | * issues otherwise. If we wait for TXBUFE in one transfer and |
| 770 | * then starts waiting for ENDRX in the next, it's difficult | 770 | * then starts waiting for RXBUFF in the next, it's difficult |
| 771 | * to tell the difference between the ENDRX interrupt we're | 771 | * to tell the difference between the RXBUFF interrupt we're |
| 772 | * actually waiting for and the ENDRX interrupt of the | 772 | * actually waiting for and the RXBUFF interrupt of the |
| 773 | * previous transfer. | 773 | * previous transfer. |
| 774 | * | 774 | * |
| 775 | * It should be doable, though. Just not now... | 775 | * It should be doable, though. Just not now... |
| 776 | */ | 776 | */ |
| 777 | spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); | 777 | spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); |
| 778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
| 779 | } | 779 | } |
| 780 | 780 | ||
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a0197fd4e95c..3ce39d10fafb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
| @@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) | |||
| 139 | 1, | 139 | 1, |
| 140 | DMA_MEM_TO_DEV, | 140 | DMA_MEM_TO_DEV, |
| 141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
| 142 | if (!txdesc) | ||
| 143 | return NULL; | ||
| 144 | |||
| 142 | txdesc->callback = dw_spi_dma_tx_done; | 145 | txdesc->callback = dw_spi_dma_tx_done; |
| 143 | txdesc->callback_param = dws; | 146 | txdesc->callback_param = dws; |
| 144 | 147 | ||
| @@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) | |||
| 184 | 1, | 187 | 1, |
| 185 | DMA_DEV_TO_MEM, | 188 | DMA_DEV_TO_MEM, |
| 186 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 189 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
| 190 | if (!rxdesc) | ||
| 191 | return NULL; | ||
| 192 | |||
| 187 | rxdesc->callback = dw_spi_dma_rx_done; | 193 | rxdesc->callback = dw_spi_dma_rx_done; |
| 188 | rxdesc->callback_param = dws; | 194 | rxdesc->callback_param = dws; |
| 189 | 195 | ||
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 5ba331047cbe..6d331e0db331 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c | |||
| @@ -36,13 +36,13 @@ struct spi_pci_desc { | |||
| 36 | 36 | ||
| 37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { | 37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { |
| 38 | .setup = dw_spi_mid_init, | 38 | .setup = dw_spi_mid_init, |
| 39 | .num_cs = 32, | 39 | .num_cs = 5, |
| 40 | .bus_num = 0, | 40 | .bus_num = 0, |
| 41 | }; | 41 | }; |
| 42 | 42 | ||
| 43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { | 43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { |
| 44 | .setup = dw_spi_mid_init, | 44 | .setup = dw_spi_mid_init, |
| 45 | .num_cs = 4, | 45 | .num_cs = 2, |
| 46 | .bus_num = 1, | 46 | .bus_num = 1, |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a97a62b298a..4847afba89f4 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
| @@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) | |||
| 621 | if (!dws->fifo_len) { | 621 | if (!dws->fifo_len) { |
| 622 | u32 fifo; | 622 | u32 fifo; |
| 623 | 623 | ||
| 624 | for (fifo = 2; fifo <= 256; fifo++) { | 624 | for (fifo = 1; fifo < 256; fifo++) { |
| 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); |
| 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) |
| 627 | break; | 627 | break; |
| 628 | } | 628 | } |
| 629 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 629 | dw_writew(dws, DW_SPI_TXFLTR, 0); |
| 630 | 630 | ||
| 631 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; | 631 | dws->fifo_len = (fifo == 1) ? 0 : fifo; |
| 632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); | 632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); |
| 633 | } | 633 | } |
| 634 | } | 634 | } |
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index c01567d53581..e649bc7d4c08 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
| @@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
| 459 | unsigned long flags; | 459 | unsigned long flags; |
| 460 | int ret; | 460 | int ret; |
| 461 | 461 | ||
| 462 | if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { | ||
| 463 | dev_err(spfi->dev, | ||
| 464 | "Transfer length (%d) is greater than the max supported (%d)", | ||
| 465 | xfer->len, SPFI_TRANSACTION_TSIZE_MASK); | ||
| 466 | return -EINVAL; | ||
| 467 | } | ||
| 468 | |||
| 462 | /* | 469 | /* |
| 463 | * Stop all DMA and reset the controller if the previous transaction | 470 | * Stop all DMA and reset the controller if the previous transaction |
| 464 | * timed-out and never completed it's DMA. | 471 | * timed-out and never completed it's DMA. |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 89ca162801da..ee513a85296b 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
| @@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022) | |||
| 534 | pl022->cur_msg = NULL; | 534 | pl022->cur_msg = NULL; |
| 535 | pl022->cur_transfer = NULL; | 535 | pl022->cur_transfer = NULL; |
| 536 | pl022->cur_chip = NULL; | 536 | pl022->cur_chip = NULL; |
| 537 | spi_finalize_current_message(pl022->master); | ||
| 538 | 537 | ||
| 539 | /* disable the SPI/SSP operation */ | 538 | /* disable the SPI/SSP operation */ |
| 540 | writew((readw(SSP_CR1(pl022->virtbase)) & | 539 | writew((readw(SSP_CR1(pl022->virtbase)) & |
| 541 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | 540 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); |
| 542 | 541 | ||
| 542 | spi_finalize_current_message(pl022->master); | ||
| 543 | } | 543 | } |
| 544 | 544 | ||
| 545 | /** | 545 | /** |
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 884a716e50cb..5c0616870358 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
| @@ -101,6 +101,7 @@ struct ti_qspi { | |||
| 101 | #define QSPI_FLEN(n) ((n - 1) << 0) | 101 | #define QSPI_FLEN(n) ((n - 1) << 0) |
| 102 | 102 | ||
| 103 | /* STATUS REGISTER */ | 103 | /* STATUS REGISTER */ |
| 104 | #define BUSY 0x01 | ||
| 104 | #define WC 0x02 | 105 | #define WC 0x02 |
| 105 | 106 | ||
| 106 | /* INTERRUPT REGISTER */ | 107 | /* INTERRUPT REGISTER */ |
| @@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) | |||
| 199 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); | 200 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); |
| 200 | } | 201 | } |
| 201 | 202 | ||
| 203 | static inline u32 qspi_is_busy(struct ti_qspi *qspi) | ||
| 204 | { | ||
| 205 | u32 stat; | ||
| 206 | unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT; | ||
| 207 | |||
| 208 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
| 209 | while ((stat & BUSY) && time_after(timeout, jiffies)) { | ||
| 210 | cpu_relax(); | ||
| 211 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
| 212 | } | ||
| 213 | |||
| 214 | WARN(stat & BUSY, "qspi busy\n"); | ||
| 215 | return stat & BUSY; | ||
| 216 | } | ||
| 217 | |||
| 202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | 218 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) |
| 203 | { | 219 | { |
| 204 | int wlen, count; | 220 | int wlen, count; |
| @@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
| 211 | wlen = t->bits_per_word >> 3; /* in bytes */ | 227 | wlen = t->bits_per_word >> 3; /* in bytes */ |
| 212 | 228 | ||
| 213 | while (count) { | 229 | while (count) { |
| 230 | if (qspi_is_busy(qspi)) | ||
| 231 | return -EBUSY; | ||
| 232 | |||
| 214 | switch (wlen) { | 233 | switch (wlen) { |
| 215 | case 1: | 234 | case 1: |
| 216 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", | 235 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", |
| @@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
| 266 | 285 | ||
| 267 | while (count) { | 286 | while (count) { |
| 268 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); | 287 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); |
| 288 | if (qspi_is_busy(qspi)) | ||
| 289 | return -EBUSY; | ||
| 290 | |||
| 269 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | 291 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); |
| 270 | if (!wait_for_completion_timeout(&qspi->transfer_complete, | 292 | if (!wait_for_completion_timeout(&qspi->transfer_complete, |
| 271 | QSPI_COMPLETION_TIMEOUT)) { | 293 | QSPI_COMPLETION_TIMEOUT)) { |
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 9800c01e6fb9..3f72451d2de0 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c | |||
| @@ -426,7 +426,6 @@ static int pci171x_ai_insn_read(struct comedi_device *dev, | |||
| 426 | unsigned int *data) | 426 | unsigned int *data) |
| 427 | { | 427 | { |
| 428 | struct pci1710_private *devpriv = dev->private; | 428 | struct pci1710_private *devpriv = dev->private; |
| 429 | unsigned int chan = CR_CHAN(insn->chanspec); | ||
| 430 | int ret = 0; | 429 | int ret = 0; |
| 431 | int i; | 430 | int i; |
| 432 | 431 | ||
| @@ -447,7 +446,7 @@ static int pci171x_ai_insn_read(struct comedi_device *dev, | |||
| 447 | if (ret) | 446 | if (ret) |
| 448 | break; | 447 | break; |
| 449 | 448 | ||
| 450 | ret = pci171x_ai_read_sample(dev, s, chan, &val); | 449 | ret = pci171x_ai_read_sample(dev, s, 0, &val); |
| 451 | if (ret) | 450 | if (ret) |
| 452 | break; | 451 | break; |
| 453 | 452 | ||
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c index dbdea71d6b95..e856f01ca077 100644 --- a/drivers/staging/comedi/drivers/comedi_isadma.c +++ b/drivers/staging/comedi/drivers/comedi_isadma.c | |||
| @@ -91,9 +91,10 @@ unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan, | |||
| 91 | stalled++; | 91 | stalled++; |
| 92 | if (stalled > 10) | 92 | if (stalled > 10) |
| 93 | break; | 93 | break; |
| 94 | } else { | ||
| 95 | residue = new_residue; | ||
| 96 | stalled = 0; | ||
| 94 | } | 97 | } |
| 95 | residue = new_residue; | ||
| 96 | stalled = 0; | ||
| 97 | } | 98 | } |
| 98 | return residue; | 99 | return residue; |
| 99 | } | 100 | } |
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index e37118321a27..a0906685e27f 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c | |||
| @@ -103,11 +103,6 @@ enum vmk80xx_model { | |||
| 103 | VMK8061_MODEL | 103 | VMK8061_MODEL |
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| 106 | struct firmware_version { | ||
| 107 | unsigned char ic3_vers[32]; /* USB-Controller */ | ||
| 108 | unsigned char ic6_vers[32]; /* CPU */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | static const struct comedi_lrange vmk8061_range = { | 106 | static const struct comedi_lrange vmk8061_range = { |
| 112 | 2, { | 107 | 2, { |
| 113 | UNI_RANGE(5), | 108 | UNI_RANGE(5), |
| @@ -156,68 +151,12 @@ static const struct vmk80xx_board vmk80xx_boardinfo[] = { | |||
| 156 | struct vmk80xx_private { | 151 | struct vmk80xx_private { |
| 157 | struct usb_endpoint_descriptor *ep_rx; | 152 | struct usb_endpoint_descriptor *ep_rx; |
| 158 | struct usb_endpoint_descriptor *ep_tx; | 153 | struct usb_endpoint_descriptor *ep_tx; |
| 159 | struct firmware_version fw; | ||
| 160 | struct semaphore limit_sem; | 154 | struct semaphore limit_sem; |
| 161 | unsigned char *usb_rx_buf; | 155 | unsigned char *usb_rx_buf; |
| 162 | unsigned char *usb_tx_buf; | 156 | unsigned char *usb_tx_buf; |
| 163 | enum vmk80xx_model model; | 157 | enum vmk80xx_model model; |
| 164 | }; | 158 | }; |
| 165 | 159 | ||
| 166 | static int vmk80xx_check_data_link(struct comedi_device *dev) | ||
| 167 | { | ||
| 168 | struct vmk80xx_private *devpriv = dev->private; | ||
| 169 | struct usb_device *usb = comedi_to_usb_dev(dev); | ||
| 170 | unsigned int tx_pipe; | ||
| 171 | unsigned int rx_pipe; | ||
| 172 | unsigned char tx[1]; | ||
| 173 | unsigned char rx[2]; | ||
| 174 | |||
| 175 | tx_pipe = usb_sndbulkpipe(usb, 0x01); | ||
| 176 | rx_pipe = usb_rcvbulkpipe(usb, 0x81); | ||
| 177 | |||
| 178 | tx[0] = VMK8061_CMD_RD_PWR_STAT; | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Check that IC6 (PIC16F871) is powered and | ||
| 182 | * running and the data link between IC3 and | ||
| 183 | * IC6 is working properly | ||
| 184 | */ | ||
| 185 | usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval); | ||
| 186 | usb_bulk_msg(usb, rx_pipe, rx, 2, NULL, HZ * 10); | ||
| 187 | |||
| 188 | return (int)rx[1]; | ||
| 189 | } | ||
| 190 | |||
| 191 | static void vmk80xx_read_eeprom(struct comedi_device *dev, int flag) | ||
| 192 | { | ||
| 193 | struct vmk80xx_private *devpriv = dev->private; | ||
| 194 | struct usb_device *usb = comedi_to_usb_dev(dev); | ||
| 195 | unsigned int tx_pipe; | ||
| 196 | unsigned int rx_pipe; | ||
| 197 | unsigned char tx[1]; | ||
| 198 | unsigned char rx[64]; | ||
| 199 | int cnt; | ||
| 200 | |||
| 201 | tx_pipe = usb_sndbulkpipe(usb, 0x01); | ||
| 202 | rx_pipe = usb_rcvbulkpipe(usb, 0x81); | ||
| 203 | |||
| 204 | tx[0] = VMK8061_CMD_RD_VERSION; | ||
| 205 | |||
| 206 | /* | ||
| 207 | * Read the firmware version info of IC3 and | ||
| 208 | * IC6 from the internal EEPROM of the IC | ||
| 209 | */ | ||
| 210 | usb_bulk_msg(usb, tx_pipe, tx, 1, NULL, devpriv->ep_tx->bInterval); | ||
| 211 | usb_bulk_msg(usb, rx_pipe, rx, 64, &cnt, HZ * 10); | ||
| 212 | |||
| 213 | rx[cnt] = '\0'; | ||
| 214 | |||
| 215 | if (flag & IC3_VERSION) | ||
| 216 | strncpy(devpriv->fw.ic3_vers, rx + 1, 24); | ||
| 217 | else /* IC6_VERSION */ | ||
| 218 | strncpy(devpriv->fw.ic6_vers, rx + 25, 24); | ||
| 219 | } | ||
| 220 | |||
| 221 | static void vmk80xx_do_bulk_msg(struct comedi_device *dev) | 160 | static void vmk80xx_do_bulk_msg(struct comedi_device *dev) |
| 222 | { | 161 | { |
| 223 | struct vmk80xx_private *devpriv = dev->private; | 162 | struct vmk80xx_private *devpriv = dev->private; |
| @@ -878,16 +817,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, | |||
| 878 | 817 | ||
| 879 | usb_set_intfdata(intf, devpriv); | 818 | usb_set_intfdata(intf, devpriv); |
| 880 | 819 | ||
| 881 | if (devpriv->model == VMK8061_MODEL) { | ||
| 882 | vmk80xx_read_eeprom(dev, IC3_VERSION); | ||
| 883 | dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers); | ||
| 884 | |||
| 885 | if (vmk80xx_check_data_link(dev)) { | ||
| 886 | vmk80xx_read_eeprom(dev, IC6_VERSION); | ||
| 887 | dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers); | ||
| 888 | } | ||
| 889 | } | ||
| 890 | |||
| 891 | if (devpriv->model == VMK8055_MODEL) | 820 | if (devpriv->model == VMK8055_MODEL) |
| 892 | vmk80xx_reset_device(dev); | 821 | vmk80xx_reset_device(dev); |
| 893 | 822 | ||
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index d9d6fad7cb00..816174388f13 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c | |||
| @@ -214,11 +214,17 @@ struct mxs_lradc { | |||
| 214 | unsigned long is_divided; | 214 | unsigned long is_divided; |
| 215 | 215 | ||
| 216 | /* | 216 | /* |
| 217 | * Touchscreen LRADC channels receives a private slot in the CTRL4 | 217 | * When the touchscreen is enabled, we give it two private virtual |
| 218 | * register, the slot #7. Therefore only 7 slots instead of 8 in the | 218 | * channels: #6 and #7. This means that only 6 virtual channels (instead |
| 219 | * CTRL4 register can be mapped to LRADC channels when using the | 219 | * of 8) will be available for buffered capture. |
| 220 | * touchscreen. | 220 | */ |
| 221 | * | 221 | #define TOUCHSCREEN_VCHANNEL1 7 |
| 222 | #define TOUCHSCREEN_VCHANNEL2 6 | ||
| 223 | #define BUFFER_VCHANS_LIMITED 0x3f | ||
| 224 | #define BUFFER_VCHANS_ALL 0xff | ||
| 225 | u8 buffer_vchans; | ||
| 226 | |||
| 227 | /* | ||
| 222 | * Furthermore, certain LRADC channels are shared between touchscreen | 228 | * Furthermore, certain LRADC channels are shared between touchscreen |
| 223 | * and/or touch-buttons and generic LRADC block. Therefore when using | 229 | * and/or touch-buttons and generic LRADC block. Therefore when using |
| 224 | * either of these, these channels are not available for the regular | 230 | * either of these, these channels are not available for the regular |
| @@ -342,6 +348,9 @@ struct mxs_lradc { | |||
| 342 | #define LRADC_CTRL4 0x140 | 348 | #define LRADC_CTRL4 0x140 |
| 343 | #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) | 349 | #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) |
| 344 | #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) | 350 | #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) |
| 351 | #define LRADC_CTRL4_LRADCSELECT(n, x) \ | ||
| 352 | (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \ | ||
| 353 | LRADC_CTRL4_LRADCSELECT_MASK(n)) | ||
| 345 | 354 | ||
| 346 | #define LRADC_RESOLUTION 12 | 355 | #define LRADC_RESOLUTION 12 |
| 347 | #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) | 356 | #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) |
| @@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc) | |||
| 416 | LRADC_STATUS_TOUCH_DETECT_RAW); | 425 | LRADC_STATUS_TOUCH_DETECT_RAW); |
| 417 | } | 426 | } |
| 418 | 427 | ||
| 428 | static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch, | ||
| 429 | unsigned ch) | ||
| 430 | { | ||
| 431 | mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch), | ||
| 432 | LRADC_CTRL4); | ||
| 433 | mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4); | ||
| 434 | } | ||
| 435 | |||
| 419 | static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) | 436 | static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) |
| 420 | { | 437 | { |
| 421 | /* | 438 | /* |
| @@ -450,12 +467,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) | |||
| 450 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), | 467 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), |
| 451 | LRADC_DELAY(3)); | 468 | LRADC_DELAY(3)); |
| 452 | 469 | ||
| 453 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | | 470 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1); |
| 454 | LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | | ||
| 455 | LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); | ||
| 456 | 471 | ||
| 457 | /* wake us again, when the complete conversion is done */ | ||
| 458 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1); | ||
| 459 | /* | 472 | /* |
| 460 | * after changing the touchscreen plates setting | 473 | * after changing the touchscreen plates setting |
| 461 | * the signals need some initial time to settle. Start the | 474 | * the signals need some initial time to settle. Start the |
| @@ -509,12 +522,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1, | |||
| 509 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), | 522 | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), |
| 510 | LRADC_DELAY(3)); | 523 | LRADC_DELAY(3)); |
| 511 | 524 | ||
| 512 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | | 525 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1); |
| 513 | LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | | ||
| 514 | LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); | ||
| 515 | 526 | ||
| 516 | /* wake us again, when the conversions are done */ | ||
| 517 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1); | ||
| 518 | /* | 527 | /* |
| 519 | * after changing the touchscreen plates setting | 528 | * after changing the touchscreen plates setting |
| 520 | * the signals need some initial time to settle. Start the | 529 | * the signals need some initial time to settle. Start the |
| @@ -580,36 +589,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc, | |||
| 580 | #define TS_CH_XM 4 | 589 | #define TS_CH_XM 4 |
| 581 | #define TS_CH_YM 5 | 590 | #define TS_CH_YM 5 |
| 582 | 591 | ||
| 583 | static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc) | ||
| 584 | { | ||
| 585 | u32 reg; | ||
| 586 | int val; | ||
| 587 | |||
| 588 | reg = readl(lradc->base + LRADC_CTRL1); | ||
| 589 | |||
| 590 | /* only channels 3 to 5 are of interest here */ | ||
| 591 | if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) { | ||
| 592 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) | | ||
| 593 | LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1); | ||
| 594 | val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP); | ||
| 595 | } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) { | ||
| 596 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) | | ||
| 597 | LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1); | ||
| 598 | val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM); | ||
| 599 | } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) { | ||
| 600 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) | | ||
| 601 | LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1); | ||
| 602 | val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM); | ||
| 603 | } else { | ||
| 604 | return -EIO; | ||
| 605 | } | ||
| 606 | |||
| 607 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); | ||
| 608 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); | ||
| 609 | |||
| 610 | return val; | ||
| 611 | } | ||
| 612 | |||
| 613 | /* | 592 | /* |
| 614 | * YP(open)--+-------------+ | 593 | * YP(open)--+-------------+ |
| 615 | * | |--+ | 594 | * | |--+ |
| @@ -653,7 +632,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc) | |||
| 653 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); | 632 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); |
| 654 | 633 | ||
| 655 | lradc->cur_plate = LRADC_SAMPLE_X; | 634 | lradc->cur_plate = LRADC_SAMPLE_X; |
| 656 | mxs_lradc_setup_ts_channel(lradc, TS_CH_YP); | 635 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP); |
| 636 | mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); | ||
| 657 | } | 637 | } |
| 658 | 638 | ||
| 659 | /* | 639 | /* |
| @@ -674,7 +654,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc) | |||
| 674 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); | 654 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); |
| 675 | 655 | ||
| 676 | lradc->cur_plate = LRADC_SAMPLE_Y; | 656 | lradc->cur_plate = LRADC_SAMPLE_Y; |
| 677 | mxs_lradc_setup_ts_channel(lradc, TS_CH_XM); | 657 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM); |
| 658 | mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); | ||
| 678 | } | 659 | } |
| 679 | 660 | ||
| 680 | /* | 661 | /* |
| @@ -695,7 +676,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc) | |||
| 695 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); | 676 | mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); |
| 696 | 677 | ||
| 697 | lradc->cur_plate = LRADC_SAMPLE_PRESSURE; | 678 | lradc->cur_plate = LRADC_SAMPLE_PRESSURE; |
| 698 | mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); | 679 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM); |
| 680 | mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP); | ||
| 681 | mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2, | ||
| 682 | TOUCHSCREEN_VCHANNEL1); | ||
| 699 | } | 683 | } |
| 700 | 684 | ||
| 701 | static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) | 685 | static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) |
| @@ -708,6 +692,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) | |||
| 708 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); | 692 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); |
| 709 | } | 693 | } |
| 710 | 694 | ||
| 695 | static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc) | ||
| 696 | { | ||
| 697 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, | ||
| 698 | LRADC_CTRL1); | ||
| 699 | mxs_lradc_reg_set(lradc, | ||
| 700 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); | ||
| 701 | /* | ||
| 702 | * start with the Y-pos, because it uses nearly the same plate | ||
| 703 | * settings like the touch detection | ||
| 704 | */ | ||
| 705 | mxs_lradc_prepare_y_pos(lradc); | ||
| 706 | } | ||
| 707 | |||
| 711 | static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) | 708 | static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) |
| 712 | { | 709 | { |
| 713 | input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); | 710 | input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); |
| @@ -725,10 +722,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc) | |||
| 725 | * start a dummy conversion to burn time to settle the signals | 722 | * start a dummy conversion to burn time to settle the signals |
| 726 | * note: we are not interested in the conversion's value | 723 | * note: we are not interested in the conversion's value |
| 727 | */ | 724 | */ |
| 728 | mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5)); | 725 | mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1)); |
| 729 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); | 726 | mxs_lradc_reg_clear(lradc, |
| 730 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1); | 727 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | |
| 731 | mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) | | 728 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); |
| 729 | mxs_lradc_reg_wrt(lradc, | ||
| 730 | LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) | | ||
| 732 | LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ | 731 | LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ |
| 733 | LRADC_DELAY(2)); | 732 | LRADC_DELAY(2)); |
| 734 | } | 733 | } |
| @@ -760,59 +759,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid) | |||
| 760 | 759 | ||
| 761 | /* if it is released, wait for the next touch via IRQ */ | 760 | /* if it is released, wait for the next touch via IRQ */ |
| 762 | lradc->cur_plate = LRADC_TOUCH; | 761 | lradc->cur_plate = LRADC_TOUCH; |
| 763 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); | 762 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); |
| 763 | mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); | ||
| 764 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ | | ||
| 765 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | | ||
| 766 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); | ||
| 764 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); | 767 | mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); |
| 765 | } | 768 | } |
| 766 | 769 | ||
| 767 | /* touchscreen's state machine */ | 770 | /* touchscreen's state machine */ |
| 768 | static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) | 771 | static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) |
| 769 | { | 772 | { |
| 770 | int val; | ||
| 771 | |||
| 772 | switch (lradc->cur_plate) { | 773 | switch (lradc->cur_plate) { |
| 773 | case LRADC_TOUCH: | 774 | case LRADC_TOUCH: |
| 774 | /* | 775 | if (mxs_lradc_check_touch_event(lradc)) |
| 775 | * start with the Y-pos, because it uses nearly the same plate | 776 | mxs_lradc_start_touch_event(lradc); |
| 776 | * settings like the touch detection | ||
| 777 | */ | ||
| 778 | if (mxs_lradc_check_touch_event(lradc)) { | ||
| 779 | mxs_lradc_reg_clear(lradc, | ||
| 780 | LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, | ||
| 781 | LRADC_CTRL1); | ||
| 782 | mxs_lradc_prepare_y_pos(lradc); | ||
| 783 | } | ||
| 784 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, | 777 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, |
| 785 | LRADC_CTRL1); | 778 | LRADC_CTRL1); |
| 786 | return; | 779 | return; |
| 787 | 780 | ||
| 788 | case LRADC_SAMPLE_Y: | 781 | case LRADC_SAMPLE_Y: |
| 789 | val = mxs_lradc_read_ts_channel(lradc); | 782 | lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc, |
| 790 | if (val < 0) { | 783 | TOUCHSCREEN_VCHANNEL1); |
| 791 | mxs_lradc_enable_touch_detection(lradc); /* re-start */ | ||
| 792 | return; | ||
| 793 | } | ||
| 794 | lradc->ts_y_pos = val; | ||
| 795 | mxs_lradc_prepare_x_pos(lradc); | 784 | mxs_lradc_prepare_x_pos(lradc); |
| 796 | return; | 785 | return; |
| 797 | 786 | ||
| 798 | case LRADC_SAMPLE_X: | 787 | case LRADC_SAMPLE_X: |
| 799 | val = mxs_lradc_read_ts_channel(lradc); | 788 | lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc, |
| 800 | if (val < 0) { | 789 | TOUCHSCREEN_VCHANNEL1); |
| 801 | mxs_lradc_enable_touch_detection(lradc); /* re-start */ | ||
| 802 | return; | ||
| 803 | } | ||
| 804 | lradc->ts_x_pos = val; | ||
| 805 | mxs_lradc_prepare_pressure(lradc); | 790 | mxs_lradc_prepare_pressure(lradc); |
| 806 | return; | 791 | return; |
| 807 | 792 | ||
| 808 | case LRADC_SAMPLE_PRESSURE: | 793 | case LRADC_SAMPLE_PRESSURE: |
| 809 | lradc->ts_pressure = | 794 | lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc, |
| 810 | mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); | 795 | TOUCHSCREEN_VCHANNEL2, |
| 796 | TOUCHSCREEN_VCHANNEL1); | ||
| 811 | mxs_lradc_complete_touch_event(lradc); | 797 | mxs_lradc_complete_touch_event(lradc); |
| 812 | return; | 798 | return; |
| 813 | 799 | ||
| 814 | case LRADC_SAMPLE_VALID: | 800 | case LRADC_SAMPLE_VALID: |
| 815 | val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */ | ||
| 816 | mxs_lradc_finish_touch_event(lradc, 1); | 801 | mxs_lradc_finish_touch_event(lradc, 1); |
| 817 | break; | 802 | break; |
| 818 | } | 803 | } |
| @@ -844,9 +829,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val) | |||
| 844 | * used if doing raw sampling. | 829 | * used if doing raw sampling. |
| 845 | */ | 830 | */ |
| 846 | if (lradc->soc == IMX28_LRADC) | 831 | if (lradc->soc == IMX28_LRADC) |
| 847 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, | 832 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), |
| 848 | LRADC_CTRL1); | 833 | LRADC_CTRL1); |
| 849 | mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); | 834 | mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0); |
| 850 | 835 | ||
| 851 | /* Enable / disable the divider per requirement */ | 836 | /* Enable / disable the divider per requirement */ |
| 852 | if (test_bit(chan, &lradc->is_divided)) | 837 | if (test_bit(chan, &lradc->is_divided)) |
| @@ -1090,9 +1075,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc) | |||
| 1090 | { | 1075 | { |
| 1091 | /* stop all interrupts from firing */ | 1076 | /* stop all interrupts from firing */ |
| 1092 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | | 1077 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | |
| 1093 | LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) | | 1078 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | |
| 1094 | LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5), | 1079 | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); |
| 1095 | LRADC_CTRL1); | ||
| 1096 | 1080 | ||
| 1097 | /* Power-down touchscreen touch-detect circuitry. */ | 1081 | /* Power-down touchscreen touch-detect circuitry. */ |
| 1098 | mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); | 1082 | mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); |
| @@ -1158,26 +1142,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data) | |||
| 1158 | struct iio_dev *iio = data; | 1142 | struct iio_dev *iio = data; |
| 1159 | struct mxs_lradc *lradc = iio_priv(iio); | 1143 | struct mxs_lradc *lradc = iio_priv(iio); |
| 1160 | unsigned long reg = readl(lradc->base + LRADC_CTRL1); | 1144 | unsigned long reg = readl(lradc->base + LRADC_CTRL1); |
| 1145 | uint32_t clr_irq = mxs_lradc_irq_mask(lradc); | ||
| 1161 | const uint32_t ts_irq_mask = | 1146 | const uint32_t ts_irq_mask = |
| 1162 | LRADC_CTRL1_TOUCH_DETECT_IRQ | | 1147 | LRADC_CTRL1_TOUCH_DETECT_IRQ | |
| 1163 | LRADC_CTRL1_LRADC_IRQ(2) | | 1148 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | |
| 1164 | LRADC_CTRL1_LRADC_IRQ(3) | | 1149 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2); |
| 1165 | LRADC_CTRL1_LRADC_IRQ(4) | | ||
| 1166 | LRADC_CTRL1_LRADC_IRQ(5); | ||
| 1167 | 1150 | ||
| 1168 | if (!(reg & mxs_lradc_irq_mask(lradc))) | 1151 | if (!(reg & mxs_lradc_irq_mask(lradc))) |
| 1169 | return IRQ_NONE; | 1152 | return IRQ_NONE; |
| 1170 | 1153 | ||
| 1171 | if (lradc->use_touchscreen && (reg & ts_irq_mask)) | 1154 | if (lradc->use_touchscreen && (reg & ts_irq_mask)) { |
| 1172 | mxs_lradc_handle_touch(lradc); | 1155 | mxs_lradc_handle_touch(lradc); |
| 1173 | 1156 | ||
| 1174 | if (iio_buffer_enabled(iio)) | 1157 | /* Make sure we don't clear the next conversion's interrupt. */ |
| 1175 | iio_trigger_poll(iio->trig); | 1158 | clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | |
| 1176 | else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) | 1159 | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2)); |
| 1160 | } | ||
| 1161 | |||
| 1162 | if (iio_buffer_enabled(iio)) { | ||
| 1163 | if (reg & lradc->buffer_vchans) | ||
| 1164 | iio_trigger_poll(iio->trig); | ||
| 1165 | } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) { | ||
| 1177 | complete(&lradc->completion); | 1166 | complete(&lradc->completion); |
| 1167 | } | ||
| 1178 | 1168 | ||
| 1179 | mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), | 1169 | mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1); |
| 1180 | LRADC_CTRL1); | ||
| 1181 | 1170 | ||
| 1182 | return IRQ_HANDLED; | 1171 | return IRQ_HANDLED; |
| 1183 | } | 1172 | } |
| @@ -1289,9 +1278,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) | |||
| 1289 | } | 1278 | } |
| 1290 | 1279 | ||
| 1291 | if (lradc->soc == IMX28_LRADC) | 1280 | if (lradc->soc == IMX28_LRADC) |
| 1292 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, | 1281 | mxs_lradc_reg_clear(lradc, |
| 1293 | LRADC_CTRL1); | 1282 | lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, |
| 1294 | mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); | 1283 | LRADC_CTRL1); |
| 1284 | mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); | ||
| 1295 | 1285 | ||
| 1296 | for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { | 1286 | for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { |
| 1297 | ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); | 1287 | ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); |
| @@ -1324,10 +1314,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio) | |||
| 1324 | mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | | 1314 | mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | |
| 1325 | LRADC_DELAY_KICK, LRADC_DELAY(0)); | 1315 | LRADC_DELAY_KICK, LRADC_DELAY(0)); |
| 1326 | 1316 | ||
| 1327 | mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); | 1317 | mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); |
| 1328 | if (lradc->soc == IMX28_LRADC) | 1318 | if (lradc->soc == IMX28_LRADC) |
| 1329 | mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, | 1319 | mxs_lradc_reg_clear(lradc, |
| 1330 | LRADC_CTRL1); | 1320 | lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, |
| 1321 | LRADC_CTRL1); | ||
| 1331 | 1322 | ||
| 1332 | kfree(lradc->buffer); | 1323 | kfree(lradc->buffer); |
| 1333 | mutex_unlock(&lradc->lock); | 1324 | mutex_unlock(&lradc->lock); |
| @@ -1353,7 +1344,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio, | |||
| 1353 | if (lradc->use_touchbutton) | 1344 | if (lradc->use_touchbutton) |
| 1354 | rsvd_chans++; | 1345 | rsvd_chans++; |
| 1355 | if (lradc->use_touchscreen) | 1346 | if (lradc->use_touchscreen) |
| 1356 | rsvd_chans++; | 1347 | rsvd_chans += 2; |
| 1357 | 1348 | ||
| 1358 | /* Test for attempts to map channels with special mode of operation. */ | 1349 | /* Test for attempts to map channels with special mode of operation. */ |
| 1359 | if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) | 1350 | if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) |
| @@ -1413,6 +1404,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = { | |||
| 1413 | .channel = 8, | 1404 | .channel = 8, |
| 1414 | .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, | 1405 | .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, |
| 1415 | }, | 1406 | }, |
| 1407 | /* Hidden channel to keep indexes */ | ||
| 1408 | { | ||
| 1409 | .type = IIO_TEMP, | ||
| 1410 | .indexed = 1, | ||
| 1411 | .scan_index = -1, | ||
| 1412 | .channel = 9, | ||
| 1413 | }, | ||
| 1416 | MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ | 1414 | MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ |
| 1417 | MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ | 1415 | MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ |
| 1418 | MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ | 1416 | MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ |
| @@ -1583,6 +1581,11 @@ static int mxs_lradc_probe(struct platform_device *pdev) | |||
| 1583 | 1581 | ||
| 1584 | touch_ret = mxs_lradc_probe_touchscreen(lradc, node); | 1582 | touch_ret = mxs_lradc_probe_touchscreen(lradc, node); |
| 1585 | 1583 | ||
| 1584 | if (touch_ret == 0) | ||
| 1585 | lradc->buffer_vchans = BUFFER_VCHANS_LIMITED; | ||
| 1586 | else | ||
| 1587 | lradc->buffer_vchans = BUFFER_VCHANS_ALL; | ||
| 1588 | |||
| 1586 | /* Grab all IRQ sources */ | 1589 | /* Grab all IRQ sources */ |
| 1587 | for (i = 0; i < of_cfg->irq_count; i++) { | 1590 | for (i = 0; i < of_cfg->irq_count; i++) { |
| 1588 | lradc->irq[i] = platform_get_irq(pdev, i); | 1591 | lradc->irq[i] = platform_get_irq(pdev, i); |
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c index 017d2f8379b7..c17893b4918c 100644 --- a/drivers/staging/iio/resolver/ad2s1200.c +++ b/drivers/staging/iio/resolver/ad2s1200.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
| 19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 21 | #include <linux/bitops.h> | ||
| 21 | 22 | ||
| 22 | #include <linux/iio/iio.h> | 23 | #include <linux/iio/iio.h> |
| 23 | #include <linux/iio/sysfs.h> | 24 | #include <linux/iio/sysfs.h> |
| @@ -68,7 +69,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev, | |||
| 68 | break; | 69 | break; |
| 69 | case IIO_ANGL_VEL: | 70 | case IIO_ANGL_VEL: |
| 70 | vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); | 71 | vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); |
| 71 | vel = (vel << 4) >> 4; | 72 | vel = sign_extend32(vel, 11); |
| 72 | *val = vel; | 73 | *val = vel; |
| 73 | break; | 74 | break; |
| 74 | default: | 75 | default: |
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c index f88b08877025..1e25133d35e2 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c | |||
| @@ -208,7 +208,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, | |||
| 208 | trip_cnt, GFP_KERNEL); | 208 | trip_cnt, GFP_KERNEL); |
| 209 | if (!int34x_thermal_zone->aux_trips) { | 209 | if (!int34x_thermal_zone->aux_trips) { |
| 210 | ret = -ENOMEM; | 210 | ret = -ENOMEM; |
| 211 | goto free_mem; | 211 | goto err_trip_alloc; |
| 212 | } | 212 | } |
| 213 | trip_mask = BIT(trip_cnt) - 1; | 213 | trip_mask = BIT(trip_cnt) - 1; |
| 214 | int34x_thermal_zone->aux_trip_nr = trip_cnt; | 214 | int34x_thermal_zone->aux_trip_nr = trip_cnt; |
| @@ -248,14 +248,15 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, | |||
| 248 | 0, 0); | 248 | 0, 0); |
| 249 | if (IS_ERR(int34x_thermal_zone->zone)) { | 249 | if (IS_ERR(int34x_thermal_zone->zone)) { |
| 250 | ret = PTR_ERR(int34x_thermal_zone->zone); | 250 | ret = PTR_ERR(int34x_thermal_zone->zone); |
| 251 | goto free_lpat; | 251 | goto err_thermal_zone; |
| 252 | } | 252 | } |
| 253 | 253 | ||
| 254 | return int34x_thermal_zone; | 254 | return int34x_thermal_zone; |
| 255 | 255 | ||
| 256 | free_lpat: | 256 | err_thermal_zone: |
| 257 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); | 257 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); |
| 258 | free_mem: | 258 | kfree(int34x_thermal_zone->aux_trips); |
| 259 | err_trip_alloc: | ||
| 259 | kfree(int34x_thermal_zone); | 260 | kfree(int34x_thermal_zone); |
| 260 | return ERR_PTR(ret); | 261 | return ERR_PTR(ret); |
| 261 | } | 262 | } |
| @@ -266,6 +267,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone | |||
| 266 | { | 267 | { |
| 267 | thermal_zone_device_unregister(int34x_thermal_zone->zone); | 268 | thermal_zone_device_unregister(int34x_thermal_zone->zone); |
| 268 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); | 269 | acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); |
| 270 | kfree(int34x_thermal_zone->aux_trips); | ||
| 269 | kfree(int34x_thermal_zone); | 271 | kfree(int34x_thermal_zone); |
| 270 | } | 272 | } |
| 271 | EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); | 273 | EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); |
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 1fc54ab911d2..1d30b0975651 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c | |||
| @@ -682,6 +682,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) | |||
| 682 | 682 | ||
| 683 | if (on) { | 683 | if (on) { |
| 684 | con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); | 684 | con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); |
| 685 | con |= (1 << EXYNOS7_PD_DET_EN_SHIFT); | ||
| 685 | interrupt_en = | 686 | interrupt_en = |
| 686 | (of_thermal_is_trip_valid(tz, 7) | 687 | (of_thermal_is_trip_valid(tz, 7) |
| 687 | << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | | 688 | << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | |
| @@ -704,9 +705,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) | |||
| 704 | interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; | 705 | interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; |
| 705 | } else { | 706 | } else { |
| 706 | con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); | 707 | con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); |
| 708 | con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT); | ||
| 707 | interrupt_en = 0; /* Disable all interrupts */ | 709 | interrupt_en = 0; /* Disable all interrupts */ |
| 708 | } | 710 | } |
| 709 | con |= 1 << EXYNOS7_PD_DET_EN_SHIFT; | ||
| 710 | 711 | ||
| 711 | writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); | 712 | writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); |
| 712 | writel(con, data->base + EXYNOS_TMU_REG_CONTROL); | 713 | writel(con, data->base + EXYNOS_TMU_REG_CONTROL); |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 48491d1a81d6..174d3bcf8bd7 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
| @@ -899,6 +899,22 @@ thermal_cooling_device_trip_point_show(struct device *dev, | |||
| 899 | return sprintf(buf, "%d\n", instance->trip); | 899 | return sprintf(buf, "%d\n", instance->trip); |
| 900 | } | 900 | } |
| 901 | 901 | ||
| 902 | static struct attribute *cooling_device_attrs[] = { | ||
| 903 | &dev_attr_cdev_type.attr, | ||
| 904 | &dev_attr_max_state.attr, | ||
| 905 | &dev_attr_cur_state.attr, | ||
| 906 | NULL, | ||
| 907 | }; | ||
| 908 | |||
| 909 | static const struct attribute_group cooling_device_attr_group = { | ||
| 910 | .attrs = cooling_device_attrs, | ||
| 911 | }; | ||
| 912 | |||
| 913 | static const struct attribute_group *cooling_device_attr_groups[] = { | ||
| 914 | &cooling_device_attr_group, | ||
| 915 | NULL, | ||
| 916 | }; | ||
| 917 | |||
| 902 | /* Device management */ | 918 | /* Device management */ |
| 903 | 919 | ||
| 904 | /** | 920 | /** |
| @@ -1130,6 +1146,7 @@ __thermal_cooling_device_register(struct device_node *np, | |||
| 1130 | cdev->ops = ops; | 1146 | cdev->ops = ops; |
| 1131 | cdev->updated = false; | 1147 | cdev->updated = false; |
| 1132 | cdev->device.class = &thermal_class; | 1148 | cdev->device.class = &thermal_class; |
| 1149 | cdev->device.groups = cooling_device_attr_groups; | ||
| 1133 | cdev->devdata = devdata; | 1150 | cdev->devdata = devdata; |
| 1134 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); | 1151 | dev_set_name(&cdev->device, "cooling_device%d", cdev->id); |
| 1135 | result = device_register(&cdev->device); | 1152 | result = device_register(&cdev->device); |
| @@ -1139,21 +1156,6 @@ __thermal_cooling_device_register(struct device_node *np, | |||
| 1139 | return ERR_PTR(result); | 1156 | return ERR_PTR(result); |
| 1140 | } | 1157 | } |
| 1141 | 1158 | ||
| 1142 | /* sys I/F */ | ||
| 1143 | if (type) { | ||
| 1144 | result = device_create_file(&cdev->device, &dev_attr_cdev_type); | ||
| 1145 | if (result) | ||
| 1146 | goto unregister; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | result = device_create_file(&cdev->device, &dev_attr_max_state); | ||
| 1150 | if (result) | ||
| 1151 | goto unregister; | ||
| 1152 | |||
| 1153 | result = device_create_file(&cdev->device, &dev_attr_cur_state); | ||
| 1154 | if (result) | ||
| 1155 | goto unregister; | ||
| 1156 | |||
| 1157 | /* Add 'this' new cdev to the global cdev list */ | 1159 | /* Add 'this' new cdev to the global cdev list */ |
| 1158 | mutex_lock(&thermal_list_lock); | 1160 | mutex_lock(&thermal_list_lock); |
| 1159 | list_add(&cdev->node, &thermal_cdev_list); | 1161 | list_add(&cdev->node, &thermal_cdev_list); |
| @@ -1163,11 +1165,6 @@ __thermal_cooling_device_register(struct device_node *np, | |||
| 1163 | bind_cdev(cdev); | 1165 | bind_cdev(cdev); |
| 1164 | 1166 | ||
| 1165 | return cdev; | 1167 | return cdev; |
| 1166 | |||
| 1167 | unregister: | ||
| 1168 | release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); | ||
| 1169 | device_unregister(&cdev->device); | ||
| 1170 | return ERR_PTR(result); | ||
| 1171 | } | 1168 | } |
| 1172 | 1169 | ||
| 1173 | /** | 1170 | /** |
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c index d7b198c400c7..ce24182f8514 100644 --- a/drivers/tty/bfin_jtag_comm.c +++ b/drivers/tty/bfin_jtag_comm.c | |||
| @@ -210,18 +210,6 @@ bfin_jc_chars_in_buffer(struct tty_struct *tty) | |||
| 210 | return circ_cnt(&bfin_jc_write_buf); | 210 | return circ_cnt(&bfin_jc_write_buf); |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | static void | ||
| 214 | bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout) | ||
| 215 | { | ||
| 216 | unsigned long expire = jiffies + timeout; | ||
| 217 | while (!circ_empty(&bfin_jc_write_buf)) { | ||
| 218 | if (signal_pending(current)) | ||
| 219 | break; | ||
| 220 | if (time_after(jiffies, expire)) | ||
| 221 | break; | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | static const struct tty_operations bfin_jc_ops = { | 213 | static const struct tty_operations bfin_jc_ops = { |
| 226 | .open = bfin_jc_open, | 214 | .open = bfin_jc_open, |
| 227 | .close = bfin_jc_close, | 215 | .close = bfin_jc_close, |
| @@ -230,7 +218,6 @@ static const struct tty_operations bfin_jc_ops = { | |||
| 230 | .flush_chars = bfin_jc_flush_chars, | 218 | .flush_chars = bfin_jc_flush_chars, |
| 231 | .write_room = bfin_jc_write_room, | 219 | .write_room = bfin_jc_write_room, |
| 232 | .chars_in_buffer = bfin_jc_chars_in_buffer, | 220 | .chars_in_buffer = bfin_jc_chars_in_buffer, |
| 233 | .wait_until_sent = bfin_jc_wait_until_sent, | ||
| 234 | }; | 221 | }; |
| 235 | 222 | ||
| 236 | static int __init bfin_jc_init(void) | 223 | static int __init bfin_jc_init(void) |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index e3b9570a1eff..deae122c9c4b 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
| @@ -2138,8 +2138,8 @@ int serial8250_do_startup(struct uart_port *port) | |||
| 2138 | /* | 2138 | /* |
| 2139 | * Clear the interrupt registers. | 2139 | * Clear the interrupt registers. |
| 2140 | */ | 2140 | */ |
| 2141 | if (serial_port_in(port, UART_LSR) & UART_LSR_DR) | 2141 | serial_port_in(port, UART_LSR); |
| 2142 | serial_port_in(port, UART_RX); | 2142 | serial_port_in(port, UART_RX); |
| 2143 | serial_port_in(port, UART_IIR); | 2143 | serial_port_in(port, UART_IIR); |
| 2144 | serial_port_in(port, UART_MSR); | 2144 | serial_port_in(port, UART_MSR); |
| 2145 | 2145 | ||
| @@ -2300,8 +2300,8 @@ dont_test_tx_en: | |||
| 2300 | * saved flags to avoid getting false values from polling | 2300 | * saved flags to avoid getting false values from polling |
| 2301 | * routines or the previous session. | 2301 | * routines or the previous session. |
| 2302 | */ | 2302 | */ |
| 2303 | if (serial_port_in(port, UART_LSR) & UART_LSR_DR) | 2303 | serial_port_in(port, UART_LSR); |
| 2304 | serial_port_in(port, UART_RX); | 2304 | serial_port_in(port, UART_RX); |
| 2305 | serial_port_in(port, UART_IIR); | 2305 | serial_port_in(port, UART_IIR); |
| 2306 | serial_port_in(port, UART_MSR); | 2306 | serial_port_in(port, UART_MSR); |
| 2307 | up->lsr_saved_flags = 0; | 2307 | up->lsr_saved_flags = 0; |
| @@ -2394,8 +2394,7 @@ void serial8250_do_shutdown(struct uart_port *port) | |||
| 2394 | * Read data port to reset things, and then unlink from | 2394 | * Read data port to reset things, and then unlink from |
| 2395 | * the IRQ chain. | 2395 | * the IRQ chain. |
| 2396 | */ | 2396 | */ |
| 2397 | if (serial_port_in(port, UART_LSR) & UART_LSR_DR) | 2397 | serial_port_in(port, UART_RX); |
| 2398 | serial_port_in(port, UART_RX); | ||
| 2399 | serial8250_rpm_put(up); | 2398 | serial8250_rpm_put(up); |
| 2400 | 2399 | ||
| 2401 | del_timer_sync(&up->timer); | 2400 | del_timer_sync(&up->timer); |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index e60116235836..2ab229ddee38 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
| @@ -59,6 +59,8 @@ struct dw8250_data { | |||
| 59 | u8 usr_reg; | 59 | u8 usr_reg; |
| 60 | int last_mcr; | 60 | int last_mcr; |
| 61 | int line; | 61 | int line; |
| 62 | int msr_mask_on; | ||
| 63 | int msr_mask_off; | ||
| 62 | struct clk *clk; | 64 | struct clk *clk; |
| 63 | struct clk *pclk; | 65 | struct clk *pclk; |
| 64 | struct reset_control *rst; | 66 | struct reset_control *rst; |
| @@ -81,6 +83,12 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value) | |||
| 81 | value &= ~UART_MSR_DCTS; | 83 | value &= ~UART_MSR_DCTS; |
| 82 | } | 84 | } |
| 83 | 85 | ||
| 86 | /* Override any modem control signals if needed */ | ||
| 87 | if (offset == UART_MSR) { | ||
| 88 | value |= d->msr_mask_on; | ||
| 89 | value &= ~d->msr_mask_off; | ||
| 90 | } | ||
| 91 | |||
| 84 | return value; | 92 | return value; |
| 85 | } | 93 | } |
| 86 | 94 | ||
| @@ -334,6 +342,30 @@ static int dw8250_probe_of(struct uart_port *p, | |||
| 334 | if (id >= 0) | 342 | if (id >= 0) |
| 335 | p->line = id; | 343 | p->line = id; |
| 336 | 344 | ||
| 345 | if (of_property_read_bool(np, "dcd-override")) { | ||
| 346 | /* Always report DCD as active */ | ||
| 347 | data->msr_mask_on |= UART_MSR_DCD; | ||
| 348 | data->msr_mask_off |= UART_MSR_DDCD; | ||
| 349 | } | ||
| 350 | |||
| 351 | if (of_property_read_bool(np, "dsr-override")) { | ||
| 352 | /* Always report DSR as active */ | ||
| 353 | data->msr_mask_on |= UART_MSR_DSR; | ||
| 354 | data->msr_mask_off |= UART_MSR_DDSR; | ||
| 355 | } | ||
| 356 | |||
| 357 | if (of_property_read_bool(np, "cts-override")) { | ||
| 358 | /* Always report DSR as active */ | ||
| 359 | data->msr_mask_on |= UART_MSR_DSR; | ||
| 360 | data->msr_mask_off |= UART_MSR_DDSR; | ||
| 361 | } | ||
| 362 | |||
| 363 | if (of_property_read_bool(np, "ri-override")) { | ||
| 364 | /* Always report Ring indicator as inactive */ | ||
| 365 | data->msr_mask_off |= UART_MSR_RI; | ||
| 366 | data->msr_mask_off |= UART_MSR_TERI; | ||
| 367 | } | ||
| 368 | |||
| 337 | /* clock got configured through clk api, all done */ | 369 | /* clock got configured through clk api, all done */ |
| 338 | if (p->uartclk) | 370 | if (p->uartclk) |
| 339 | return 0; | 371 | return 0; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index daf2c82984e9..892eb32cdef4 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
| @@ -69,7 +69,7 @@ static void moan_device(const char *str, struct pci_dev *dev) | |||
| 69 | "Please send the output of lspci -vv, this\n" | 69 | "Please send the output of lspci -vv, this\n" |
| 70 | "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" | 70 | "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" |
| 71 | "manufacturer and name of serial board or\n" | 71 | "manufacturer and name of serial board or\n" |
| 72 | "modem board to rmk+serial@arm.linux.org.uk.\n", | 72 | "modem board to <linux-serial@vger.kernel.org>.\n", |
| 73 | pci_name(dev), str, dev->vendor, dev->device, | 73 | pci_name(dev), str, dev->vendor, dev->device, |
| 74 | dev->subsystem_vendor, dev->subsystem_device); | 74 | dev->subsystem_vendor, dev->subsystem_device); |
| 75 | } | 75 | } |
| @@ -1989,13 +1989,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
| 1989 | }, | 1989 | }, |
| 1990 | { | 1990 | { |
| 1991 | .vendor = PCI_VENDOR_ID_INTEL, | 1991 | .vendor = PCI_VENDOR_ID_INTEL, |
| 1992 | .device = PCI_DEVICE_ID_INTEL_QRK_UART, | ||
| 1993 | .subvendor = PCI_ANY_ID, | ||
| 1994 | .subdevice = PCI_ANY_ID, | ||
| 1995 | .setup = pci_default_setup, | ||
| 1996 | }, | ||
| 1997 | { | ||
| 1998 | .vendor = PCI_VENDOR_ID_INTEL, | ||
| 1999 | .device = PCI_DEVICE_ID_INTEL_BSW_UART1, | 1992 | .device = PCI_DEVICE_ID_INTEL_BSW_UART1, |
| 2000 | .subvendor = PCI_ANY_ID, | 1993 | .subvendor = PCI_ANY_ID, |
| 2001 | .subdevice = PCI_ANY_ID, | 1994 | .subdevice = PCI_ANY_ID, |
| @@ -2201,13 +2194,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
| 2201 | */ | 2194 | */ |
| 2202 | { | 2195 | { |
| 2203 | .vendor = PCI_VENDOR_ID_PLX, | 2196 | .vendor = PCI_VENDOR_ID_PLX, |
| 2204 | .device = PCI_DEVICE_ID_PLX_9030, | ||
| 2205 | .subvendor = PCI_SUBVENDOR_ID_PERLE, | ||
| 2206 | .subdevice = PCI_ANY_ID, | ||
| 2207 | .setup = pci_default_setup, | ||
| 2208 | }, | ||
| 2209 | { | ||
| 2210 | .vendor = PCI_VENDOR_ID_PLX, | ||
| 2211 | .device = PCI_DEVICE_ID_PLX_9050, | 2197 | .device = PCI_DEVICE_ID_PLX_9050, |
| 2212 | .subvendor = PCI_SUBVENDOR_ID_EXSYS, | 2198 | .subvendor = PCI_SUBVENDOR_ID_EXSYS, |
| 2213 | .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, | 2199 | .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, |
| @@ -5415,10 +5401,6 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
| 5415 | PCI_ANY_ID, PCI_ANY_ID, | 5401 | PCI_ANY_ID, PCI_ANY_ID, |
| 5416 | 0, 0, pbn_b0_bt_2_115200 }, | 5402 | 0, 0, pbn_b0_bt_2_115200 }, |
| 5417 | 5403 | ||
| 5418 | { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S, | ||
| 5419 | PCI_ANY_ID, PCI_ANY_ID, | ||
| 5420 | 0, 0, pbn_b0_bt_2_115200 }, | ||
| 5421 | |||
| 5422 | { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, | 5404 | { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, |
| 5423 | PCI_ANY_ID, PCI_ANY_ID, | 5405 | PCI_ANY_ID, PCI_ANY_ID, |
| 5424 | 0, 0, pbn_wch384_4 }, | 5406 | 0, 0, pbn_wch384_4 }, |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 846552bff67d..4e959c43f680 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include <linux/gpio/consumer.h> | 47 | #include <linux/gpio/consumer.h> |
| 48 | #include <linux/err.h> | 48 | #include <linux/err.h> |
| 49 | #include <linux/irq.h> | 49 | #include <linux/irq.h> |
| 50 | #include <linux/suspend.h> | ||
| 50 | 51 | ||
| 51 | #include <asm/io.h> | 52 | #include <asm/io.h> |
| 52 | #include <asm/ioctls.h> | 53 | #include <asm/ioctls.h> |
| @@ -173,6 +174,12 @@ struct atmel_uart_port { | |||
| 173 | bool ms_irq_enabled; | 174 | bool ms_irq_enabled; |
| 174 | bool is_usart; /* usart or uart */ | 175 | bool is_usart; /* usart or uart */ |
| 175 | struct timer_list uart_timer; /* uart timer */ | 176 | struct timer_list uart_timer; /* uart timer */ |
| 177 | |||
| 178 | bool suspended; | ||
| 179 | unsigned int pending; | ||
| 180 | unsigned int pending_status; | ||
| 181 | spinlock_t lock_suspended; | ||
| 182 | |||
| 176 | int (*prepare_rx)(struct uart_port *port); | 183 | int (*prepare_rx)(struct uart_port *port); |
| 177 | int (*prepare_tx)(struct uart_port *port); | 184 | int (*prepare_tx)(struct uart_port *port); |
| 178 | void (*schedule_rx)(struct uart_port *port); | 185 | void (*schedule_rx)(struct uart_port *port); |
| @@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) | |||
| 1179 | { | 1186 | { |
| 1180 | struct uart_port *port = dev_id; | 1187 | struct uart_port *port = dev_id; |
| 1181 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | 1188 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
| 1182 | unsigned int status, pending, pass_counter = 0; | 1189 | unsigned int status, pending, mask, pass_counter = 0; |
| 1183 | bool gpio_handled = false; | 1190 | bool gpio_handled = false; |
| 1184 | 1191 | ||
| 1192 | spin_lock(&atmel_port->lock_suspended); | ||
| 1193 | |||
| 1185 | do { | 1194 | do { |
| 1186 | status = atmel_get_lines_status(port); | 1195 | status = atmel_get_lines_status(port); |
| 1187 | pending = status & UART_GET_IMR(port); | 1196 | mask = UART_GET_IMR(port); |
| 1197 | pending = status & mask; | ||
| 1188 | if (!gpio_handled) { | 1198 | if (!gpio_handled) { |
| 1189 | /* | 1199 | /* |
| 1190 | * Dealing with GPIO interrupt | 1200 | * Dealing with GPIO interrupt |
| @@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) | |||
| 1206 | if (!pending) | 1216 | if (!pending) |
| 1207 | break; | 1217 | break; |
| 1208 | 1218 | ||
| 1219 | if (atmel_port->suspended) { | ||
| 1220 | atmel_port->pending |= pending; | ||
| 1221 | atmel_port->pending_status = status; | ||
| 1222 | UART_PUT_IDR(port, mask); | ||
| 1223 | pm_system_wakeup(); | ||
| 1224 | break; | ||
| 1225 | } | ||
| 1226 | |||
| 1209 | atmel_handle_receive(port, pending); | 1227 | atmel_handle_receive(port, pending); |
| 1210 | atmel_handle_status(port, pending, status); | 1228 | atmel_handle_status(port, pending, status); |
| 1211 | atmel_handle_transmit(port, pending); | 1229 | atmel_handle_transmit(port, pending); |
| 1212 | } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); | 1230 | } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); |
| 1213 | 1231 | ||
| 1232 | spin_unlock(&atmel_port->lock_suspended); | ||
| 1233 | |||
| 1214 | return pass_counter ? IRQ_HANDLED : IRQ_NONE; | 1234 | return pass_counter ? IRQ_HANDLED : IRQ_NONE; |
| 1215 | } | 1235 | } |
| 1216 | 1236 | ||
| @@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port) | |||
| 1742 | /* | 1762 | /* |
| 1743 | * Allocate the IRQ | 1763 | * Allocate the IRQ |
| 1744 | */ | 1764 | */ |
| 1745 | retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, | 1765 | retval = request_irq(port->irq, atmel_interrupt, |
| 1766 | IRQF_SHARED | IRQF_COND_SUSPEND, | ||
| 1746 | tty ? tty->name : "atmel_serial", port); | 1767 | tty ? tty->name : "atmel_serial", port); |
| 1747 | if (retval) { | 1768 | if (retval) { |
| 1748 | dev_err(port->dev, "atmel_startup - Can't get irq\n"); | 1769 | dev_err(port->dev, "atmel_startup - Can't get irq\n"); |
| @@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev, | |||
| 2513 | 2534 | ||
| 2514 | /* we can not wake up if we're running on slow clock */ | 2535 | /* we can not wake up if we're running on slow clock */ |
| 2515 | atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); | 2536 | atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); |
| 2516 | if (atmel_serial_clk_will_stop()) | 2537 | if (atmel_serial_clk_will_stop()) { |
| 2538 | unsigned long flags; | ||
| 2539 | |||
| 2540 | spin_lock_irqsave(&atmel_port->lock_suspended, flags); | ||
| 2541 | atmel_port->suspended = true; | ||
| 2542 | spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); | ||
| 2517 | device_set_wakeup_enable(&pdev->dev, 0); | 2543 | device_set_wakeup_enable(&pdev->dev, 0); |
| 2544 | } | ||
| 2518 | 2545 | ||
| 2519 | uart_suspend_port(&atmel_uart, port); | 2546 | uart_suspend_port(&atmel_uart, port); |
| 2520 | 2547 | ||
| @@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev) | |||
| 2525 | { | 2552 | { |
| 2526 | struct uart_port *port = platform_get_drvdata(pdev); | 2553 | struct uart_port *port = platform_get_drvdata(pdev); |
| 2527 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | 2554 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
| 2555 | unsigned long flags; | ||
| 2556 | |||
| 2557 | spin_lock_irqsave(&atmel_port->lock_suspended, flags); | ||
| 2558 | if (atmel_port->pending) { | ||
| 2559 | atmel_handle_receive(port, atmel_port->pending); | ||
| 2560 | atmel_handle_status(port, atmel_port->pending, | ||
| 2561 | atmel_port->pending_status); | ||
| 2562 | atmel_handle_transmit(port, atmel_port->pending); | ||
| 2563 | atmel_port->pending = 0; | ||
| 2564 | } | ||
| 2565 | atmel_port->suspended = false; | ||
| 2566 | spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); | ||
| 2528 | 2567 | ||
| 2529 | uart_resume_port(&atmel_uart, port); | 2568 | uart_resume_port(&atmel_uart, port); |
| 2530 | device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); | 2569 | device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); |
| @@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev) | |||
| 2593 | port->backup_imr = 0; | 2632 | port->backup_imr = 0; |
| 2594 | port->uart.line = ret; | 2633 | port->uart.line = ret; |
| 2595 | 2634 | ||
| 2635 | spin_lock_init(&port->lock_suspended); | ||
| 2636 | |||
| 2596 | ret = atmel_init_gpios(port, &pdev->dev); | 2637 | ret = atmel_init_gpios(port, &pdev->dev); |
| 2597 | if (ret < 0) | 2638 | if (ret < 0) |
| 2598 | dev_err(&pdev->dev, "%s", | 2639 | dev_err(&pdev->dev, "%s", |
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 7ff61e24a195..33fb94f78967 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c | |||
| @@ -133,10 +133,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev, | |||
| 133 | if (of_find_property(np, "no-loopback-test", NULL)) | 133 | if (of_find_property(np, "no-loopback-test", NULL)) |
| 134 | port->flags |= UPF_SKIP_TEST; | 134 | port->flags |= UPF_SKIP_TEST; |
| 135 | 135 | ||
| 136 | ret = of_alias_get_id(np, "serial"); | ||
| 137 | if (ret >= 0) | ||
| 138 | port->line = ret; | ||
| 139 | |||
| 140 | port->dev = &ofdev->dev; | 136 | port->dev = &ofdev->dev; |
| 141 | 137 | ||
| 142 | switch (type) { | 138 | switch (type) { |
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 594b63331ef4..bca975f5093b 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c | |||
| @@ -293,8 +293,10 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id) | |||
| 293 | 293 | ||
| 294 | ims = serial_in(port, SPRD_IMSR); | 294 | ims = serial_in(port, SPRD_IMSR); |
| 295 | 295 | ||
| 296 | if (!ims) | 296 | if (!ims) { |
| 297 | spin_unlock(&port->lock); | ||
| 297 | return IRQ_NONE; | 298 | return IRQ_NONE; |
| 299 | } | ||
| 298 | 300 | ||
| 299 | serial_out(port, SPRD_ICLR, ~0); | 301 | serial_out(port, SPRD_ICLR, ~0); |
| 300 | 302 | ||
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 51f066aa375e..2bb4dfc02873 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty); | |||
| 1028 | /* We limit tty time update visibility to every 8 seconds or so. */ | 1028 | /* We limit tty time update visibility to every 8 seconds or so. */ |
| 1029 | static void tty_update_time(struct timespec *time) | 1029 | static void tty_update_time(struct timespec *time) |
| 1030 | { | 1030 | { |
| 1031 | unsigned long sec = get_seconds() & ~7; | 1031 | unsigned long sec = get_seconds(); |
| 1032 | if ((long)(sec - time->tv_sec) > 0) | 1032 | if (abs(sec - time->tv_sec) & ~7) |
| 1033 | time->tv_sec = sec; | 1033 | time->tv_sec = sec; |
| 1034 | } | 1034 | } |
| 1035 | 1035 | ||
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index a5cf253b2544..632fc8152061 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c | |||
| @@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout) | |||
| 217 | #endif | 217 | #endif |
| 218 | if (!timeout) | 218 | if (!timeout) |
| 219 | timeout = MAX_SCHEDULE_TIMEOUT; | 219 | timeout = MAX_SCHEDULE_TIMEOUT; |
| 220 | if (wait_event_interruptible_timeout(tty->write_wait, | 220 | |
| 221 | !tty_chars_in_buffer(tty), timeout) >= 0) { | 221 | timeout = wait_event_interruptible_timeout(tty->write_wait, |
| 222 | if (tty->ops->wait_until_sent) | 222 | !tty_chars_in_buffer(tty), timeout); |
| 223 | tty->ops->wait_until_sent(tty, timeout); | 223 | if (timeout <= 0) |
| 224 | } | 224 | return; |
| 225 | |||
| 226 | if (timeout == MAX_SCHEDULE_TIMEOUT) | ||
| 227 | timeout = 0; | ||
| 228 | |||
| 229 | if (tty->ops->wait_until_sent) | ||
| 230 | tty->ops->wait_until_sent(tty, timeout); | ||
| 225 | } | 231 | } |
| 226 | EXPORT_SYMBOL(tty_wait_until_sent); | 232 | EXPORT_SYMBOL(tty_wait_until_sent); |
| 227 | 233 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index e78720b59d67..683617714e7c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -1650,6 +1650,8 @@ static int acm_reset_resume(struct usb_interface *intf) | |||
| 1650 | 1650 | ||
| 1651 | static const struct usb_device_id acm_ids[] = { | 1651 | static const struct usb_device_id acm_ids[] = { |
| 1652 | /* quirky and broken devices */ | 1652 | /* quirky and broken devices */ |
| 1653 | { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ | ||
| 1654 | .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ | ||
| 1653 | { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ | 1655 | { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ |
| 1654 | .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ | 1656 | .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ |
| 1655 | { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ | 1657 | { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 66abdbcfbfa5..11635537c052 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
| @@ -501,6 +501,7 @@ static void async_completed(struct urb *urb) | |||
| 501 | as->status = urb->status; | 501 | as->status = urb->status; |
| 502 | signr = as->signr; | 502 | signr = as->signr; |
| 503 | if (signr) { | 503 | if (signr) { |
| 504 | memset(&sinfo, 0, sizeof(sinfo)); | ||
| 504 | sinfo.si_signo = as->signr; | 505 | sinfo.si_signo = as->signr; |
| 505 | sinfo.si_errno = as->status; | 506 | sinfo.si_errno = as->status; |
| 506 | sinfo.si_code = SI_ASYNCIO; | 507 | sinfo.si_code = SI_ASYNCIO; |
| @@ -2382,6 +2383,7 @@ static void usbdev_remove(struct usb_device *udev) | |||
| 2382 | wake_up_all(&ps->wait); | 2383 | wake_up_all(&ps->wait); |
| 2383 | list_del_init(&ps->list); | 2384 | list_del_init(&ps->list); |
| 2384 | if (ps->discsignr) { | 2385 | if (ps->discsignr) { |
| 2386 | memset(&sinfo, 0, sizeof(sinfo)); | ||
| 2385 | sinfo.si_signo = ps->discsignr; | 2387 | sinfo.si_signo = ps->discsignr; |
| 2386 | sinfo.si_errno = EPIPE; | 2388 | sinfo.si_errno = EPIPE; |
| 2387 | sinfo.si_code = SI_ASYNCIO; | 2389 | sinfo.si_code = SI_ASYNCIO; |
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 172d64e585b6..52e0c4e5e48e 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
| @@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value) | |||
| 205 | omap->irq0_offset, value); | 205 | omap->irq0_offset, value); |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value) | ||
| 209 | { | ||
| 210 | dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC + | ||
| 211 | omap->irqmisc_offset, value); | ||
| 212 | } | ||
| 213 | |||
| 214 | static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value) | ||
| 215 | { | ||
| 216 | dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 - | ||
| 217 | omap->irq0_offset, value); | ||
| 218 | } | ||
| 219 | |||
| 208 | static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, | 220 | static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, |
| 209 | enum omap_dwc3_vbus_id_status status) | 221 | enum omap_dwc3_vbus_id_status status) |
| 210 | { | 222 | { |
| @@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap) | |||
| 345 | 357 | ||
| 346 | static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) | 358 | static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) |
| 347 | { | 359 | { |
| 360 | u32 reg; | ||
| 361 | |||
| 348 | /* disable all IRQs */ | 362 | /* disable all IRQs */ |
| 349 | dwc3_omap_write_irqmisc_set(omap, 0x00); | 363 | reg = USBOTGSS_IRQO_COREIRQ_ST; |
| 350 | dwc3_omap_write_irq0_set(omap, 0x00); | 364 | dwc3_omap_write_irq0_clr(omap, reg); |
| 365 | |||
| 366 | reg = (USBOTGSS_IRQMISC_OEVT | | ||
| 367 | USBOTGSS_IRQMISC_DRVVBUS_RISE | | ||
| 368 | USBOTGSS_IRQMISC_CHRGVBUS_RISE | | ||
| 369 | USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | | ||
| 370 | USBOTGSS_IRQMISC_IDPULLUP_RISE | | ||
| 371 | USBOTGSS_IRQMISC_DRVVBUS_FALL | | ||
| 372 | USBOTGSS_IRQMISC_CHRGVBUS_FALL | | ||
| 373 | USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | | ||
| 374 | USBOTGSS_IRQMISC_IDPULLUP_FALL); | ||
| 375 | |||
| 376 | dwc3_omap_write_irqmisc_clr(omap, reg); | ||
| 351 | } | 377 | } |
| 352 | 378 | ||
| 353 | static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); | 379 | static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 75648145dc1b..c42765b3a060 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
| @@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc, | |||
| 1161 | if (desc->opts_mutex) | 1161 | if (desc->opts_mutex) |
| 1162 | mutex_lock(desc->opts_mutex); | 1162 | mutex_lock(desc->opts_mutex); |
| 1163 | memcpy(desc->ext_compat_id, page, l); | 1163 | memcpy(desc->ext_compat_id, page, l); |
| 1164 | desc->ext_compat_id[l] = '\0'; | ||
| 1165 | 1164 | ||
| 1166 | if (desc->opts_mutex) | 1165 | if (desc->opts_mutex) |
| 1167 | mutex_unlock(desc->opts_mutex); | 1166 | mutex_unlock(desc->opts_mutex); |
| @@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc, | |||
| 1192 | if (desc->opts_mutex) | 1191 | if (desc->opts_mutex) |
| 1193 | mutex_lock(desc->opts_mutex); | 1192 | mutex_lock(desc->opts_mutex); |
| 1194 | memcpy(desc->ext_compat_id + 8, page, l); | 1193 | memcpy(desc->ext_compat_id + 8, page, l); |
| 1195 | desc->ext_compat_id[l + 8] = '\0'; | ||
| 1196 | 1194 | ||
| 1197 | if (desc->opts_mutex) | 1195 | if (desc->opts_mutex) |
| 1198 | mutex_unlock(desc->opts_mutex); | 1196 | mutex_unlock(desc->opts_mutex); |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
| @@ -144,10 +144,9 @@ struct ffs_io_data { | |||
| 144 | bool read; | 144 | bool read; |
| 145 | 145 | ||
| 146 | struct kiocb *kiocb; | 146 | struct kiocb *kiocb; |
| 147 | const struct iovec *iovec; | 147 | struct iov_iter data; |
| 148 | unsigned long nr_segs; | 148 | const void *to_free; |
| 149 | char __user *buf; | 149 | char *buf; |
| 150 | size_t len; | ||
| 151 | 150 | ||
| 152 | struct mm_struct *mm; | 151 | struct mm_struct *mm; |
| 153 | struct work_struct work; | 152 | struct work_struct work; |
| @@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
| 649 | io_data->req->actual; | 648 | io_data->req->actual; |
| 650 | 649 | ||
| 651 | if (io_data->read && ret > 0) { | 650 | if (io_data->read && ret > 0) { |
| 652 | int i; | ||
| 653 | size_t pos = 0; | ||
| 654 | |||
| 655 | /* | ||
| 656 | * Since req->length may be bigger than io_data->len (after | ||
| 657 | * being rounded up to maxpacketsize), we may end up with more | ||
| 658 | * data then user space has space for. | ||
| 659 | */ | ||
| 660 | ret = min_t(int, ret, io_data->len); | ||
| 661 | |||
| 662 | use_mm(io_data->mm); | 651 | use_mm(io_data->mm); |
| 663 | for (i = 0; i < io_data->nr_segs; i++) { | 652 | ret = copy_to_iter(io_data->buf, ret, &io_data->data); |
| 664 | size_t len = min_t(size_t, ret - pos, | 653 | if (iov_iter_count(&io_data->data)) |
| 665 | io_data->iovec[i].iov_len); | 654 | ret = -EFAULT; |
| 666 | if (!len) | ||
| 667 | break; | ||
| 668 | if (unlikely(copy_to_user(io_data->iovec[i].iov_base, | ||
| 669 | &io_data->buf[pos], len))) { | ||
| 670 | ret = -EFAULT; | ||
| 671 | break; | ||
| 672 | } | ||
| 673 | pos += len; | ||
| 674 | } | ||
| 675 | unuse_mm(io_data->mm); | 655 | unuse_mm(io_data->mm); |
| 676 | } | 656 | } |
| 677 | 657 | ||
| @@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
| 684 | 664 | ||
| 685 | io_data->kiocb->private = NULL; | 665 | io_data->kiocb->private = NULL; |
| 686 | if (io_data->read) | 666 | if (io_data->read) |
| 687 | kfree(io_data->iovec); | 667 | kfree(io_data->to_free); |
| 688 | kfree(io_data->buf); | 668 | kfree(io_data->buf); |
| 689 | kfree(io_data); | 669 | kfree(io_data); |
| 690 | } | 670 | } |
| @@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
| 743 | * before the waiting completes, so do not assign to 'gadget' earlier | 723 | * before the waiting completes, so do not assign to 'gadget' earlier |
| 744 | */ | 724 | */ |
| 745 | struct usb_gadget *gadget = epfile->ffs->gadget; | 725 | struct usb_gadget *gadget = epfile->ffs->gadget; |
| 726 | size_t copied; | ||
| 746 | 727 | ||
| 747 | spin_lock_irq(&epfile->ffs->eps_lock); | 728 | spin_lock_irq(&epfile->ffs->eps_lock); |
| 748 | /* In the meantime, endpoint got disabled or changed. */ | 729 | /* In the meantime, endpoint got disabled or changed. */ |
| @@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
| 750 | spin_unlock_irq(&epfile->ffs->eps_lock); | 731 | spin_unlock_irq(&epfile->ffs->eps_lock); |
| 751 | return -ESHUTDOWN; | 732 | return -ESHUTDOWN; |
| 752 | } | 733 | } |
| 734 | data_len = iov_iter_count(&io_data->data); | ||
| 753 | /* | 735 | /* |
| 754 | * Controller may require buffer size to be aligned to | 736 | * Controller may require buffer size to be aligned to |
| 755 | * maxpacketsize of an out endpoint. | 737 | * maxpacketsize of an out endpoint. |
| 756 | */ | 738 | */ |
| 757 | data_len = io_data->read ? | 739 | if (io_data->read) |
| 758 | usb_ep_align_maybe(gadget, ep->ep, io_data->len) : | 740 | data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); |
| 759 | io_data->len; | ||
| 760 | spin_unlock_irq(&epfile->ffs->eps_lock); | 741 | spin_unlock_irq(&epfile->ffs->eps_lock); |
| 761 | 742 | ||
| 762 | data = kmalloc(data_len, GFP_KERNEL); | 743 | data = kmalloc(data_len, GFP_KERNEL); |
| 763 | if (unlikely(!data)) | 744 | if (unlikely(!data)) |
| 764 | return -ENOMEM; | 745 | return -ENOMEM; |
| 765 | if (io_data->aio && !io_data->read) { | 746 | if (!io_data->read) { |
| 766 | int i; | 747 | copied = copy_from_iter(data, data_len, &io_data->data); |
| 767 | size_t pos = 0; | 748 | if (copied != data_len) { |
| 768 | for (i = 0; i < io_data->nr_segs; i++) { | ||
| 769 | if (unlikely(copy_from_user(&data[pos], | ||
| 770 | io_data->iovec[i].iov_base, | ||
| 771 | io_data->iovec[i].iov_len))) { | ||
| 772 | ret = -EFAULT; | ||
| 773 | goto error; | ||
| 774 | } | ||
| 775 | pos += io_data->iovec[i].iov_len; | ||
| 776 | } | ||
| 777 | } else { | ||
| 778 | if (!io_data->read && | ||
| 779 | unlikely(__copy_from_user(data, io_data->buf, | ||
| 780 | io_data->len))) { | ||
| 781 | ret = -EFAULT; | 749 | ret = -EFAULT; |
| 782 | goto error; | 750 | goto error; |
| 783 | } | 751 | } |
| @@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
| 876 | */ | 844 | */ |
| 877 | ret = ep->status; | 845 | ret = ep->status; |
| 878 | if (io_data->read && ret > 0) { | 846 | if (io_data->read && ret > 0) { |
| 879 | ret = min_t(size_t, ret, io_data->len); | 847 | ret = copy_to_iter(data, ret, &io_data->data); |
| 880 | 848 | if (unlikely(iov_iter_count(&io_data->data))) | |
| 881 | if (unlikely(copy_to_user(io_data->buf, | ||
| 882 | data, ret))) | ||
| 883 | ret = -EFAULT; | 849 | ret = -EFAULT; |
| 884 | } | 850 | } |
| 885 | } | 851 | } |
| @@ -898,37 +864,6 @@ error: | |||
| 898 | return ret; | 864 | return ret; |
| 899 | } | 865 | } |
| 900 | 866 | ||
| 901 | static ssize_t | ||
| 902 | ffs_epfile_write(struct file *file, const char __user *buf, size_t len, | ||
| 903 | loff_t *ptr) | ||
| 904 | { | ||
| 905 | struct ffs_io_data io_data; | ||
| 906 | |||
| 907 | ENTER(); | ||
| 908 | |||
| 909 | io_data.aio = false; | ||
| 910 | io_data.read = false; | ||
| 911 | io_data.buf = (char * __user)buf; | ||
| 912 | io_data.len = len; | ||
| 913 | |||
| 914 | return ffs_epfile_io(file, &io_data); | ||
| 915 | } | ||
| 916 | |||
| 917 | static ssize_t | ||
| 918 | ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) | ||
| 919 | { | ||
| 920 | struct ffs_io_data io_data; | ||
| 921 | |||
| 922 | ENTER(); | ||
| 923 | |||
| 924 | io_data.aio = false; | ||
| 925 | io_data.read = true; | ||
| 926 | io_data.buf = buf; | ||
| 927 | io_data.len = len; | ||
| 928 | |||
| 929 | return ffs_epfile_io(file, &io_data); | ||
| 930 | } | ||
| 931 | |||
| 932 | static int | 867 | static int |
| 933 | ffs_epfile_open(struct inode *inode, struct file *file) | 868 | ffs_epfile_open(struct inode *inode, struct file *file) |
| 934 | { | 869 | { |
| @@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) | |||
| 965 | return value; | 900 | return value; |
| 966 | } | 901 | } |
| 967 | 902 | ||
| 968 | static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, | 903 | static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
| 969 | const struct iovec *iovec, | ||
| 970 | unsigned long nr_segs, loff_t loff) | ||
| 971 | { | 904 | { |
| 972 | struct ffs_io_data *io_data; | 905 | struct ffs_io_data io_data, *p = &io_data; |
| 906 | ssize_t res; | ||
| 973 | 907 | ||
| 974 | ENTER(); | 908 | ENTER(); |
| 975 | 909 | ||
| 976 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 910 | if (!is_sync_kiocb(kiocb)) { |
| 977 | if (unlikely(!io_data)) | 911 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
| 978 | return -ENOMEM; | 912 | if (unlikely(!p)) |
| 913 | return -ENOMEM; | ||
| 914 | p->aio = true; | ||
| 915 | } else { | ||
| 916 | p->aio = false; | ||
| 917 | } | ||
| 979 | 918 | ||
| 980 | io_data->aio = true; | 919 | p->read = false; |
| 981 | io_data->read = false; | 920 | p->kiocb = kiocb; |
| 982 | io_data->kiocb = kiocb; | 921 | p->data = *from; |
| 983 | io_data->iovec = iovec; | 922 | p->mm = current->mm; |
| 984 | io_data->nr_segs = nr_segs; | ||
| 985 | io_data->len = kiocb->ki_nbytes; | ||
| 986 | io_data->mm = current->mm; | ||
| 987 | 923 | ||
| 988 | kiocb->private = io_data; | 924 | kiocb->private = p; |
| 989 | 925 | ||
| 990 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 926 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
| 991 | 927 | ||
| 992 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 928 | res = ffs_epfile_io(kiocb->ki_filp, p); |
| 929 | if (res == -EIOCBQUEUED) | ||
| 930 | return res; | ||
| 931 | if (p->aio) | ||
| 932 | kfree(p); | ||
| 933 | else | ||
| 934 | *from = p->data; | ||
| 935 | return res; | ||
| 993 | } | 936 | } |
| 994 | 937 | ||
| 995 | static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, | 938 | static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) |
| 996 | const struct iovec *iovec, | ||
| 997 | unsigned long nr_segs, loff_t loff) | ||
| 998 | { | 939 | { |
| 999 | struct ffs_io_data *io_data; | 940 | struct ffs_io_data io_data, *p = &io_data; |
| 1000 | struct iovec *iovec_copy; | 941 | ssize_t res; |
| 1001 | 942 | ||
| 1002 | ENTER(); | 943 | ENTER(); |
| 1003 | 944 | ||
| 1004 | iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); | 945 | if (!is_sync_kiocb(kiocb)) { |
| 1005 | if (unlikely(!iovec_copy)) | 946 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
| 1006 | return -ENOMEM; | 947 | if (unlikely(!p)) |
| 1007 | 948 | return -ENOMEM; | |
| 1008 | memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); | 949 | p->aio = true; |
| 1009 | 950 | } else { | |
| 1010 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 951 | p->aio = false; |
| 1011 | if (unlikely(!io_data)) { | ||
| 1012 | kfree(iovec_copy); | ||
| 1013 | return -ENOMEM; | ||
| 1014 | } | 952 | } |
| 1015 | 953 | ||
| 1016 | io_data->aio = true; | 954 | p->read = true; |
| 1017 | io_data->read = true; | 955 | p->kiocb = kiocb; |
| 1018 | io_data->kiocb = kiocb; | 956 | if (p->aio) { |
| 1019 | io_data->iovec = iovec_copy; | 957 | p->to_free = dup_iter(&p->data, to, GFP_KERNEL); |
| 1020 | io_data->nr_segs = nr_segs; | 958 | if (!p->to_free) { |
| 1021 | io_data->len = kiocb->ki_nbytes; | 959 | kfree(p); |
| 1022 | io_data->mm = current->mm; | 960 | return -ENOMEM; |
| 961 | } | ||
| 962 | } else { | ||
| 963 | p->data = *to; | ||
| 964 | p->to_free = NULL; | ||
| 965 | } | ||
| 966 | p->mm = current->mm; | ||
| 1023 | 967 | ||
| 1024 | kiocb->private = io_data; | 968 | kiocb->private = p; |
| 1025 | 969 | ||
| 1026 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 970 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
| 1027 | 971 | ||
| 1028 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 972 | res = ffs_epfile_io(kiocb->ki_filp, p); |
| 973 | if (res == -EIOCBQUEUED) | ||
| 974 | return res; | ||
| 975 | |||
| 976 | if (p->aio) { | ||
| 977 | kfree(p->to_free); | ||
| 978 | kfree(p); | ||
| 979 | } else { | ||
| 980 | *to = p->data; | ||
| 981 | } | ||
| 982 | return res; | ||
| 1029 | } | 983 | } |
| 1030 | 984 | ||
| 1031 | static int | 985 | static int |
| @@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { | |||
| 1105 | .llseek = no_llseek, | 1059 | .llseek = no_llseek, |
| 1106 | 1060 | ||
| 1107 | .open = ffs_epfile_open, | 1061 | .open = ffs_epfile_open, |
| 1108 | .write = ffs_epfile_write, | 1062 | .write = new_sync_write, |
| 1109 | .read = ffs_epfile_read, | 1063 | .read = new_sync_read, |
| 1110 | .aio_write = ffs_epfile_aio_write, | 1064 | .write_iter = ffs_epfile_write_iter, |
| 1111 | .aio_read = ffs_epfile_aio_read, | 1065 | .read_iter = ffs_epfile_read_iter, |
| 1112 | .release = ffs_epfile_release, | 1066 | .release = ffs_epfile_release, |
| 1113 | .unlocked_ioctl = ffs_epfile_ioctl, | 1067 | .unlocked_ioctl = ffs_epfile_ioctl, |
| 1114 | }; | 1068 | }; |
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 426d69a9c018..a2612fb79eff 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c | |||
| @@ -569,7 +569,7 @@ fail: | |||
| 569 | return status; | 569 | return status; |
| 570 | } | 570 | } |
| 571 | 571 | ||
| 572 | const struct file_operations f_hidg_fops = { | 572 | static const struct file_operations f_hidg_fops = { |
| 573 | .owner = THIS_MODULE, | 573 | .owner = THIS_MODULE, |
| 574 | .open = f_hidg_open, | 574 | .open = f_hidg_open, |
| 575 | .release = f_hidg_release, | 575 | .release = f_hidg_release, |
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c index c89e96cfa3e4..c0c3ef272714 100644 --- a/drivers/usb/gadget/function/f_phonet.c +++ b/drivers/usb/gadget/function/f_phonet.c | |||
| @@ -417,7 +417,10 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | |||
| 417 | return -EINVAL; | 417 | return -EINVAL; |
| 418 | 418 | ||
| 419 | spin_lock(&port->lock); | 419 | spin_lock(&port->lock); |
| 420 | __pn_reset(f); | 420 | |
| 421 | if (fp->in_ep->driver_data) | ||
| 422 | __pn_reset(f); | ||
| 423 | |||
| 421 | if (alt == 1) { | 424 | if (alt == 1) { |
| 422 | int i; | 425 | int i; |
| 423 | 426 | ||
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index e07c50ced64d..e3dae47baef3 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c | |||
| @@ -344,7 +344,7 @@ static struct usb_endpoint_descriptor ss_int_source_desc = { | |||
| 344 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | 344 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), |
| 345 | }; | 345 | }; |
| 346 | 346 | ||
| 347 | struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = { | 347 | static struct usb_ss_ep_comp_descriptor ss_int_source_comp_desc = { |
| 348 | .bLength = USB_DT_SS_EP_COMP_SIZE, | 348 | .bLength = USB_DT_SS_EP_COMP_SIZE, |
| 349 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | 349 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, |
| 350 | 350 | ||
| @@ -362,7 +362,7 @@ static struct usb_endpoint_descriptor ss_int_sink_desc = { | |||
| 362 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), | 362 | .bInterval = USB_MS_TO_SS_INTERVAL(GZERO_INT_INTERVAL), |
| 363 | }; | 363 | }; |
| 364 | 364 | ||
| 365 | struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = { | 365 | static struct usb_ss_ep_comp_descriptor ss_int_sink_comp_desc = { |
| 366 | .bLength = USB_DT_SS_EP_COMP_SIZE, | 366 | .bLength = USB_DT_SS_EP_COMP_SIZE, |
| 367 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, | 367 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, |
| 368 | 368 | ||
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 33e16658e5cf..6d3eb8b00a48 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c | |||
| @@ -54,7 +54,7 @@ | |||
| 54 | #define UNFLW_CTRL 8 | 54 | #define UNFLW_CTRL 8 |
| 55 | #define OVFLW_CTRL 10 | 55 | #define OVFLW_CTRL 10 |
| 56 | 56 | ||
| 57 | const char *uac2_name = "snd_uac2"; | 57 | static const char *uac2_name = "snd_uac2"; |
| 58 | 58 | ||
| 59 | struct uac2_req { | 59 | struct uac2_req { |
| 60 | struct uac2_rtd_params *pp; /* parent param */ | 60 | struct uac2_rtd_params *pp; /* parent param */ |
| @@ -634,7 +634,7 @@ static struct usb_interface_descriptor std_ac_if_desc = { | |||
| 634 | }; | 634 | }; |
| 635 | 635 | ||
| 636 | /* Clock source for IN traffic */ | 636 | /* Clock source for IN traffic */ |
| 637 | struct uac_clock_source_descriptor in_clk_src_desc = { | 637 | static struct uac_clock_source_descriptor in_clk_src_desc = { |
| 638 | .bLength = sizeof in_clk_src_desc, | 638 | .bLength = sizeof in_clk_src_desc, |
| 639 | .bDescriptorType = USB_DT_CS_INTERFACE, | 639 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 640 | 640 | ||
| @@ -646,7 +646,7 @@ struct uac_clock_source_descriptor in_clk_src_desc = { | |||
| 646 | }; | 646 | }; |
| 647 | 647 | ||
| 648 | /* Clock source for OUT traffic */ | 648 | /* Clock source for OUT traffic */ |
| 649 | struct uac_clock_source_descriptor out_clk_src_desc = { | 649 | static struct uac_clock_source_descriptor out_clk_src_desc = { |
| 650 | .bLength = sizeof out_clk_src_desc, | 650 | .bLength = sizeof out_clk_src_desc, |
| 651 | .bDescriptorType = USB_DT_CS_INTERFACE, | 651 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 652 | 652 | ||
| @@ -658,7 +658,7 @@ struct uac_clock_source_descriptor out_clk_src_desc = { | |||
| 658 | }; | 658 | }; |
| 659 | 659 | ||
| 660 | /* Input Terminal for USB_OUT */ | 660 | /* Input Terminal for USB_OUT */ |
| 661 | struct uac2_input_terminal_descriptor usb_out_it_desc = { | 661 | static struct uac2_input_terminal_descriptor usb_out_it_desc = { |
| 662 | .bLength = sizeof usb_out_it_desc, | 662 | .bLength = sizeof usb_out_it_desc, |
| 663 | .bDescriptorType = USB_DT_CS_INTERFACE, | 663 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 664 | 664 | ||
| @@ -672,7 +672,7 @@ struct uac2_input_terminal_descriptor usb_out_it_desc = { | |||
| 672 | }; | 672 | }; |
| 673 | 673 | ||
| 674 | /* Input Terminal for I/O-In */ | 674 | /* Input Terminal for I/O-In */ |
| 675 | struct uac2_input_terminal_descriptor io_in_it_desc = { | 675 | static struct uac2_input_terminal_descriptor io_in_it_desc = { |
| 676 | .bLength = sizeof io_in_it_desc, | 676 | .bLength = sizeof io_in_it_desc, |
| 677 | .bDescriptorType = USB_DT_CS_INTERFACE, | 677 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 678 | 678 | ||
| @@ -686,7 +686,7 @@ struct uac2_input_terminal_descriptor io_in_it_desc = { | |||
| 686 | }; | 686 | }; |
| 687 | 687 | ||
| 688 | /* Ouput Terminal for USB_IN */ | 688 | /* Ouput Terminal for USB_IN */ |
| 689 | struct uac2_output_terminal_descriptor usb_in_ot_desc = { | 689 | static struct uac2_output_terminal_descriptor usb_in_ot_desc = { |
| 690 | .bLength = sizeof usb_in_ot_desc, | 690 | .bLength = sizeof usb_in_ot_desc, |
| 691 | .bDescriptorType = USB_DT_CS_INTERFACE, | 691 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 692 | 692 | ||
| @@ -700,7 +700,7 @@ struct uac2_output_terminal_descriptor usb_in_ot_desc = { | |||
| 700 | }; | 700 | }; |
| 701 | 701 | ||
| 702 | /* Ouput Terminal for I/O-Out */ | 702 | /* Ouput Terminal for I/O-Out */ |
| 703 | struct uac2_output_terminal_descriptor io_out_ot_desc = { | 703 | static struct uac2_output_terminal_descriptor io_out_ot_desc = { |
| 704 | .bLength = sizeof io_out_ot_desc, | 704 | .bLength = sizeof io_out_ot_desc, |
| 705 | .bDescriptorType = USB_DT_CS_INTERFACE, | 705 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 706 | 706 | ||
| @@ -713,7 +713,7 @@ struct uac2_output_terminal_descriptor io_out_ot_desc = { | |||
| 713 | .bmControls = (CONTROL_RDWR << COPY_CTRL), | 713 | .bmControls = (CONTROL_RDWR << COPY_CTRL), |
| 714 | }; | 714 | }; |
| 715 | 715 | ||
| 716 | struct uac2_ac_header_descriptor ac_hdr_desc = { | 716 | static struct uac2_ac_header_descriptor ac_hdr_desc = { |
| 717 | .bLength = sizeof ac_hdr_desc, | 717 | .bLength = sizeof ac_hdr_desc, |
| 718 | .bDescriptorType = USB_DT_CS_INTERFACE, | 718 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 719 | 719 | ||
| @@ -751,7 +751,7 @@ static struct usb_interface_descriptor std_as_out_if1_desc = { | |||
| 751 | }; | 751 | }; |
| 752 | 752 | ||
| 753 | /* Audio Stream OUT Intface Desc */ | 753 | /* Audio Stream OUT Intface Desc */ |
| 754 | struct uac2_as_header_descriptor as_out_hdr_desc = { | 754 | static struct uac2_as_header_descriptor as_out_hdr_desc = { |
| 755 | .bLength = sizeof as_out_hdr_desc, | 755 | .bLength = sizeof as_out_hdr_desc, |
| 756 | .bDescriptorType = USB_DT_CS_INTERFACE, | 756 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 757 | 757 | ||
| @@ -764,7 +764,7 @@ struct uac2_as_header_descriptor as_out_hdr_desc = { | |||
| 764 | }; | 764 | }; |
| 765 | 765 | ||
| 766 | /* Audio USB_OUT Format */ | 766 | /* Audio USB_OUT Format */ |
| 767 | struct uac2_format_type_i_descriptor as_out_fmt1_desc = { | 767 | static struct uac2_format_type_i_descriptor as_out_fmt1_desc = { |
| 768 | .bLength = sizeof as_out_fmt1_desc, | 768 | .bLength = sizeof as_out_fmt1_desc, |
| 769 | .bDescriptorType = USB_DT_CS_INTERFACE, | 769 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 770 | .bDescriptorSubtype = UAC_FORMAT_TYPE, | 770 | .bDescriptorSubtype = UAC_FORMAT_TYPE, |
| @@ -772,7 +772,7 @@ struct uac2_format_type_i_descriptor as_out_fmt1_desc = { | |||
| 772 | }; | 772 | }; |
| 773 | 773 | ||
| 774 | /* STD AS ISO OUT Endpoint */ | 774 | /* STD AS ISO OUT Endpoint */ |
| 775 | struct usb_endpoint_descriptor fs_epout_desc = { | 775 | static struct usb_endpoint_descriptor fs_epout_desc = { |
| 776 | .bLength = USB_DT_ENDPOINT_SIZE, | 776 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 777 | .bDescriptorType = USB_DT_ENDPOINT, | 777 | .bDescriptorType = USB_DT_ENDPOINT, |
| 778 | 778 | ||
| @@ -782,7 +782,7 @@ struct usb_endpoint_descriptor fs_epout_desc = { | |||
| 782 | .bInterval = 1, | 782 | .bInterval = 1, |
| 783 | }; | 783 | }; |
| 784 | 784 | ||
| 785 | struct usb_endpoint_descriptor hs_epout_desc = { | 785 | static struct usb_endpoint_descriptor hs_epout_desc = { |
| 786 | .bLength = USB_DT_ENDPOINT_SIZE, | 786 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 787 | .bDescriptorType = USB_DT_ENDPOINT, | 787 | .bDescriptorType = USB_DT_ENDPOINT, |
| 788 | 788 | ||
| @@ -828,7 +828,7 @@ static struct usb_interface_descriptor std_as_in_if1_desc = { | |||
| 828 | }; | 828 | }; |
| 829 | 829 | ||
| 830 | /* Audio Stream IN Intface Desc */ | 830 | /* Audio Stream IN Intface Desc */ |
| 831 | struct uac2_as_header_descriptor as_in_hdr_desc = { | 831 | static struct uac2_as_header_descriptor as_in_hdr_desc = { |
| 832 | .bLength = sizeof as_in_hdr_desc, | 832 | .bLength = sizeof as_in_hdr_desc, |
| 833 | .bDescriptorType = USB_DT_CS_INTERFACE, | 833 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 834 | 834 | ||
| @@ -841,7 +841,7 @@ struct uac2_as_header_descriptor as_in_hdr_desc = { | |||
| 841 | }; | 841 | }; |
| 842 | 842 | ||
| 843 | /* Audio USB_IN Format */ | 843 | /* Audio USB_IN Format */ |
| 844 | struct uac2_format_type_i_descriptor as_in_fmt1_desc = { | 844 | static struct uac2_format_type_i_descriptor as_in_fmt1_desc = { |
| 845 | .bLength = sizeof as_in_fmt1_desc, | 845 | .bLength = sizeof as_in_fmt1_desc, |
| 846 | .bDescriptorType = USB_DT_CS_INTERFACE, | 846 | .bDescriptorType = USB_DT_CS_INTERFACE, |
| 847 | .bDescriptorSubtype = UAC_FORMAT_TYPE, | 847 | .bDescriptorSubtype = UAC_FORMAT_TYPE, |
| @@ -849,7 +849,7 @@ struct uac2_format_type_i_descriptor as_in_fmt1_desc = { | |||
| 849 | }; | 849 | }; |
| 850 | 850 | ||
| 851 | /* STD AS ISO IN Endpoint */ | 851 | /* STD AS ISO IN Endpoint */ |
| 852 | struct usb_endpoint_descriptor fs_epin_desc = { | 852 | static struct usb_endpoint_descriptor fs_epin_desc = { |
| 853 | .bLength = USB_DT_ENDPOINT_SIZE, | 853 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 854 | .bDescriptorType = USB_DT_ENDPOINT, | 854 | .bDescriptorType = USB_DT_ENDPOINT, |
| 855 | 855 | ||
| @@ -859,7 +859,7 @@ struct usb_endpoint_descriptor fs_epin_desc = { | |||
| 859 | .bInterval = 1, | 859 | .bInterval = 1, |
| 860 | }; | 860 | }; |
| 861 | 861 | ||
| 862 | struct usb_endpoint_descriptor hs_epin_desc = { | 862 | static struct usb_endpoint_descriptor hs_epin_desc = { |
| 863 | .bLength = USB_DT_ENDPOINT_SIZE, | 863 | .bLength = USB_DT_ENDPOINT_SIZE, |
| 864 | .bDescriptorType = USB_DT_ENDPOINT, | 864 | .bDescriptorType = USB_DT_ENDPOINT, |
| 865 | 865 | ||
| @@ -1563,7 +1563,7 @@ static void afunc_unbind(struct usb_configuration *c, struct usb_function *f) | |||
| 1563 | agdev->out_ep->driver_data = NULL; | 1563 | agdev->out_ep->driver_data = NULL; |
| 1564 | } | 1564 | } |
| 1565 | 1565 | ||
| 1566 | struct usb_function *afunc_alloc(struct usb_function_instance *fi) | 1566 | static struct usb_function *afunc_alloc(struct usb_function_instance *fi) |
| 1567 | { | 1567 | { |
| 1568 | struct audio_dev *agdev; | 1568 | struct audio_dev *agdev; |
| 1569 | struct f_uac2_opts *opts; | 1569 | struct f_uac2_opts *opts; |
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index 5aad7fededa5..8b818fd027b3 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include "uvc.h" | 27 | #include "uvc.h" |
| 28 | #include "uvc_queue.h" | 28 | #include "uvc_queue.h" |
| 29 | #include "uvc_video.h" | 29 | #include "uvc_video.h" |
| 30 | #include "uvc_v4l2.h" | ||
| 30 | 31 | ||
| 31 | /* -------------------------------------------------------------------------- | 32 | /* -------------------------------------------------------------------------- |
| 32 | * Requests handling | 33 | * Requests handling |
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index 9cb86bc1a9a5..50a5e637ca35 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | 21 | ||
| 22 | #include "uvc.h" | 22 | #include "uvc.h" |
| 23 | #include "uvc_queue.h" | 23 | #include "uvc_queue.h" |
| 24 | #include "uvc_video.h" | ||
| 24 | 25 | ||
| 25 | /* -------------------------------------------------------------------------- | 26 | /* -------------------------------------------------------------------------- |
| 26 | * Video codecs | 27 | * Video codecs |
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index 06acfa55864a..b01b88e1b716 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c | |||
| @@ -133,7 +133,9 @@ struct gfs_configuration { | |||
| 133 | struct usb_configuration c; | 133 | struct usb_configuration c; |
| 134 | int (*eth)(struct usb_configuration *c); | 134 | int (*eth)(struct usb_configuration *c); |
| 135 | int num; | 135 | int num; |
| 136 | } gfs_configurations[] = { | 136 | }; |
| 137 | |||
| 138 | static struct gfs_configuration gfs_configurations[] = { | ||
| 137 | #ifdef CONFIG_USB_FUNCTIONFS_RNDIS | 139 | #ifdef CONFIG_USB_FUNCTIONFS_RNDIS |
| 138 | { | 140 | { |
| 139 | .eth = bind_rndis_config, | 141 | .eth = bind_rndis_config, |
| @@ -278,7 +280,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev) | |||
| 278 | if (!try_module_get(THIS_MODULE)) | 280 | if (!try_module_get(THIS_MODULE)) |
| 279 | return ERR_PTR(-ENOENT); | 281 | return ERR_PTR(-ENOENT); |
| 280 | 282 | ||
| 281 | return 0; | 283 | return NULL; |
| 282 | } | 284 | } |
| 283 | 285 | ||
| 284 | static void functionfs_release_dev(struct ffs_dev *dev) | 286 | static void functionfs_release_dev(struct ffs_dev *dev) |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
| @@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); | |||
| 74 | MODULE_AUTHOR ("David Brownell"); | 74 | MODULE_AUTHOR ("David Brownell"); |
| 75 | MODULE_LICENSE ("GPL"); | 75 | MODULE_LICENSE ("GPL"); |
| 76 | 76 | ||
| 77 | static int ep_open(struct inode *, struct file *); | ||
| 78 | |||
| 77 | 79 | ||
| 78 | /*----------------------------------------------------------------------*/ | 80 | /*----------------------------------------------------------------------*/ |
| 79 | 81 | ||
| @@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) | |||
| 283 | * still need dev->lock to use epdata->ep. | 285 | * still need dev->lock to use epdata->ep. |
| 284 | */ | 286 | */ |
| 285 | static int | 287 | static int |
| 286 | get_ready_ep (unsigned f_flags, struct ep_data *epdata) | 288 | get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) |
| 287 | { | 289 | { |
| 288 | int val; | 290 | int val; |
| 289 | 291 | ||
| 290 | if (f_flags & O_NONBLOCK) { | 292 | if (f_flags & O_NONBLOCK) { |
| 291 | if (!mutex_trylock(&epdata->lock)) | 293 | if (!mutex_trylock(&epdata->lock)) |
| 292 | goto nonblock; | 294 | goto nonblock; |
| 293 | if (epdata->state != STATE_EP_ENABLED) { | 295 | if (epdata->state != STATE_EP_ENABLED && |
| 296 | (!is_write || epdata->state != STATE_EP_READY)) { | ||
| 294 | mutex_unlock(&epdata->lock); | 297 | mutex_unlock(&epdata->lock); |
| 295 | nonblock: | 298 | nonblock: |
| 296 | val = -EAGAIN; | 299 | val = -EAGAIN; |
| @@ -305,18 +308,20 @@ nonblock: | |||
| 305 | 308 | ||
| 306 | switch (epdata->state) { | 309 | switch (epdata->state) { |
| 307 | case STATE_EP_ENABLED: | 310 | case STATE_EP_ENABLED: |
| 311 | return 0; | ||
| 312 | case STATE_EP_READY: /* not configured yet */ | ||
| 313 | if (is_write) | ||
| 314 | return 0; | ||
| 315 | // FALLTHRU | ||
| 316 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
| 308 | break; | 317 | break; |
| 309 | // case STATE_EP_DISABLED: /* "can't happen" */ | 318 | // case STATE_EP_DISABLED: /* "can't happen" */ |
| 310 | // case STATE_EP_READY: /* "can't happen" */ | ||
| 311 | default: /* error! */ | 319 | default: /* error! */ |
| 312 | pr_debug ("%s: ep %p not available, state %d\n", | 320 | pr_debug ("%s: ep %p not available, state %d\n", |
| 313 | shortname, epdata, epdata->state); | 321 | shortname, epdata, epdata->state); |
| 314 | // FALLTHROUGH | ||
| 315 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
| 316 | val = -ENODEV; | ||
| 317 | mutex_unlock(&epdata->lock); | ||
| 318 | } | 322 | } |
| 319 | return val; | 323 | mutex_unlock(&epdata->lock); |
| 324 | return -ENODEV; | ||
| 320 | } | 325 | } |
| 321 | 326 | ||
| 322 | static ssize_t | 327 | static ssize_t |
| @@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) | |||
| 363 | return value; | 368 | return value; |
| 364 | } | 369 | } |
| 365 | 370 | ||
| 366 | |||
| 367 | /* handle a synchronous OUT bulk/intr/iso transfer */ | ||
| 368 | static ssize_t | ||
| 369 | ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | ||
| 370 | { | ||
| 371 | struct ep_data *data = fd->private_data; | ||
| 372 | void *kbuf; | ||
| 373 | ssize_t value; | ||
| 374 | |||
| 375 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
| 376 | return value; | ||
| 377 | |||
| 378 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
| 379 | if (usb_endpoint_dir_in(&data->desc)) { | ||
| 380 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
| 381 | mutex_unlock(&data->lock); | ||
| 382 | return -EINVAL; | ||
| 383 | } | ||
| 384 | DBG (data->dev, "%s halt\n", data->name); | ||
| 385 | spin_lock_irq (&data->dev->lock); | ||
| 386 | if (likely (data->ep != NULL)) | ||
| 387 | usb_ep_set_halt (data->ep); | ||
| 388 | spin_unlock_irq (&data->dev->lock); | ||
| 389 | mutex_unlock(&data->lock); | ||
| 390 | return -EBADMSG; | ||
| 391 | } | ||
| 392 | |||
| 393 | /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ | ||
| 394 | |||
| 395 | value = -ENOMEM; | ||
| 396 | kbuf = kmalloc (len, GFP_KERNEL); | ||
| 397 | if (unlikely (!kbuf)) | ||
| 398 | goto free1; | ||
| 399 | |||
| 400 | value = ep_io (data, kbuf, len); | ||
| 401 | VDEBUG (data->dev, "%s read %zu OUT, status %d\n", | ||
| 402 | data->name, len, (int) value); | ||
| 403 | if (value >= 0 && copy_to_user (buf, kbuf, value)) | ||
| 404 | value = -EFAULT; | ||
| 405 | |||
| 406 | free1: | ||
| 407 | mutex_unlock(&data->lock); | ||
| 408 | kfree (kbuf); | ||
| 409 | return value; | ||
| 410 | } | ||
| 411 | |||
| 412 | /* handle a synchronous IN bulk/intr/iso transfer */ | ||
| 413 | static ssize_t | ||
| 414 | ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | ||
| 415 | { | ||
| 416 | struct ep_data *data = fd->private_data; | ||
| 417 | void *kbuf; | ||
| 418 | ssize_t value; | ||
| 419 | |||
| 420 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
| 421 | return value; | ||
| 422 | |||
| 423 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
| 424 | if (!usb_endpoint_dir_in(&data->desc)) { | ||
| 425 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
| 426 | mutex_unlock(&data->lock); | ||
| 427 | return -EINVAL; | ||
| 428 | } | ||
| 429 | DBG (data->dev, "%s halt\n", data->name); | ||
| 430 | spin_lock_irq (&data->dev->lock); | ||
| 431 | if (likely (data->ep != NULL)) | ||
| 432 | usb_ep_set_halt (data->ep); | ||
| 433 | spin_unlock_irq (&data->dev->lock); | ||
| 434 | mutex_unlock(&data->lock); | ||
| 435 | return -EBADMSG; | ||
| 436 | } | ||
| 437 | |||
| 438 | /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ | ||
| 439 | |||
| 440 | value = -ENOMEM; | ||
| 441 | kbuf = memdup_user(buf, len); | ||
| 442 | if (IS_ERR(kbuf)) { | ||
| 443 | value = PTR_ERR(kbuf); | ||
| 444 | kbuf = NULL; | ||
| 445 | goto free1; | ||
| 446 | } | ||
| 447 | |||
| 448 | value = ep_io (data, kbuf, len); | ||
| 449 | VDEBUG (data->dev, "%s write %zu IN, status %d\n", | ||
| 450 | data->name, len, (int) value); | ||
| 451 | free1: | ||
| 452 | mutex_unlock(&data->lock); | ||
| 453 | kfree (kbuf); | ||
| 454 | return value; | ||
| 455 | } | ||
| 456 | |||
| 457 | static int | 371 | static int |
| 458 | ep_release (struct inode *inode, struct file *fd) | 372 | ep_release (struct inode *inode, struct file *fd) |
| 459 | { | 373 | { |
| @@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) | |||
| 481 | struct ep_data *data = fd->private_data; | 395 | struct ep_data *data = fd->private_data; |
| 482 | int status; | 396 | int status; |
| 483 | 397 | ||
| 484 | if ((status = get_ready_ep (fd->f_flags, data)) < 0) | 398 | if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) |
| 485 | return status; | 399 | return status; |
| 486 | 400 | ||
| 487 | spin_lock_irq (&data->dev->lock); | 401 | spin_lock_irq (&data->dev->lock); |
| @@ -517,8 +431,8 @@ struct kiocb_priv { | |||
| 517 | struct mm_struct *mm; | 431 | struct mm_struct *mm; |
| 518 | struct work_struct work; | 432 | struct work_struct work; |
| 519 | void *buf; | 433 | void *buf; |
| 520 | const struct iovec *iv; | 434 | struct iov_iter to; |
| 521 | unsigned long nr_segs; | 435 | const void *to_free; |
| 522 | unsigned actual; | 436 | unsigned actual; |
| 523 | }; | 437 | }; |
| 524 | 438 | ||
| @@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) | |||
| 541 | return value; | 455 | return value; |
| 542 | } | 456 | } |
| 543 | 457 | ||
| 544 | static ssize_t ep_copy_to_user(struct kiocb_priv *priv) | ||
| 545 | { | ||
| 546 | ssize_t len, total; | ||
| 547 | void *to_copy; | ||
| 548 | int i; | ||
| 549 | |||
| 550 | /* copy stuff into user buffers */ | ||
| 551 | total = priv->actual; | ||
| 552 | len = 0; | ||
| 553 | to_copy = priv->buf; | ||
| 554 | for (i=0; i < priv->nr_segs; i++) { | ||
| 555 | ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); | ||
| 556 | |||
| 557 | if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { | ||
| 558 | if (len == 0) | ||
| 559 | len = -EFAULT; | ||
| 560 | break; | ||
| 561 | } | ||
| 562 | |||
| 563 | total -= this; | ||
| 564 | len += this; | ||
| 565 | to_copy += this; | ||
| 566 | if (total == 0) | ||
| 567 | break; | ||
| 568 | } | ||
| 569 | |||
| 570 | return len; | ||
| 571 | } | ||
| 572 | |||
| 573 | static void ep_user_copy_worker(struct work_struct *work) | 458 | static void ep_user_copy_worker(struct work_struct *work) |
| 574 | { | 459 | { |
| 575 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); | 460 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); |
| @@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) | |||
| 578 | size_t ret; | 463 | size_t ret; |
| 579 | 464 | ||
| 580 | use_mm(mm); | 465 | use_mm(mm); |
| 581 | ret = ep_copy_to_user(priv); | 466 | ret = copy_to_iter(priv->buf, priv->actual, &priv->to); |
| 582 | unuse_mm(mm); | 467 | unuse_mm(mm); |
| 468 | if (!ret) | ||
| 469 | ret = -EFAULT; | ||
| 583 | 470 | ||
| 584 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ | 471 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ |
| 585 | aio_complete(iocb, ret, ret); | 472 | aio_complete(iocb, ret, ret); |
| 586 | 473 | ||
| 587 | kfree(priv->buf); | 474 | kfree(priv->buf); |
| 475 | kfree(priv->to_free); | ||
| 588 | kfree(priv); | 476 | kfree(priv); |
| 589 | } | 477 | } |
| 590 | 478 | ||
| @@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
| 603 | * don't need to copy anything to userspace, so we can | 491 | * don't need to copy anything to userspace, so we can |
| 604 | * complete the aio request immediately. | 492 | * complete the aio request immediately. |
| 605 | */ | 493 | */ |
| 606 | if (priv->iv == NULL || unlikely(req->actual == 0)) { | 494 | if (priv->to_free == NULL || unlikely(req->actual == 0)) { |
| 607 | kfree(req->buf); | 495 | kfree(req->buf); |
| 496 | kfree(priv->to_free); | ||
| 608 | kfree(priv); | 497 | kfree(priv); |
| 609 | iocb->private = NULL; | 498 | iocb->private = NULL; |
| 610 | /* aio_complete() reports bytes-transferred _and_ faults */ | 499 | /* aio_complete() reports bytes-transferred _and_ faults */ |
| @@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
| 618 | 507 | ||
| 619 | priv->buf = req->buf; | 508 | priv->buf = req->buf; |
| 620 | priv->actual = req->actual; | 509 | priv->actual = req->actual; |
| 510 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
| 621 | schedule_work(&priv->work); | 511 | schedule_work(&priv->work); |
| 622 | } | 512 | } |
| 623 | spin_unlock(&epdata->dev->lock); | 513 | spin_unlock(&epdata->dev->lock); |
| @@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
| 626 | put_ep(epdata); | 516 | put_ep(epdata); |
| 627 | } | 517 | } |
| 628 | 518 | ||
| 629 | static ssize_t | 519 | static ssize_t ep_aio(struct kiocb *iocb, |
| 630 | ep_aio_rwtail( | 520 | struct kiocb_priv *priv, |
| 631 | struct kiocb *iocb, | 521 | struct ep_data *epdata, |
| 632 | char *buf, | 522 | char *buf, |
| 633 | size_t len, | 523 | size_t len) |
| 634 | struct ep_data *epdata, | ||
| 635 | const struct iovec *iv, | ||
| 636 | unsigned long nr_segs | ||
| 637 | ) | ||
| 638 | { | 524 | { |
| 639 | struct kiocb_priv *priv; | 525 | struct usb_request *req; |
| 640 | struct usb_request *req; | 526 | ssize_t value; |
| 641 | ssize_t value; | ||
| 642 | 527 | ||
| 643 | priv = kmalloc(sizeof *priv, GFP_KERNEL); | ||
| 644 | if (!priv) { | ||
| 645 | value = -ENOMEM; | ||
| 646 | fail: | ||
| 647 | kfree(buf); | ||
| 648 | return value; | ||
| 649 | } | ||
| 650 | iocb->private = priv; | 528 | iocb->private = priv; |
| 651 | priv->iocb = iocb; | 529 | priv->iocb = iocb; |
| 652 | priv->iv = iv; | ||
| 653 | priv->nr_segs = nr_segs; | ||
| 654 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
| 655 | |||
| 656 | value = get_ready_ep(iocb->ki_filp->f_flags, epdata); | ||
| 657 | if (unlikely(value < 0)) { | ||
| 658 | kfree(priv); | ||
| 659 | goto fail; | ||
| 660 | } | ||
| 661 | 530 | ||
| 662 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); | 531 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); |
| 663 | get_ep(epdata); | 532 | get_ep(epdata); |
| @@ -669,75 +538,154 @@ fail: | |||
| 669 | * allocate or submit those if the host disconnected. | 538 | * allocate or submit those if the host disconnected. |
| 670 | */ | 539 | */ |
| 671 | spin_lock_irq(&epdata->dev->lock); | 540 | spin_lock_irq(&epdata->dev->lock); |
| 672 | if (likely(epdata->ep)) { | 541 | value = -ENODEV; |
| 673 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 542 | if (unlikely(epdata->ep)) |
| 674 | if (likely(req)) { | 543 | goto fail; |
| 675 | priv->req = req; | ||
| 676 | req->buf = buf; | ||
| 677 | req->length = len; | ||
| 678 | req->complete = ep_aio_complete; | ||
| 679 | req->context = iocb; | ||
| 680 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
| 681 | if (unlikely(0 != value)) | ||
| 682 | usb_ep_free_request(epdata->ep, req); | ||
| 683 | } else | ||
| 684 | value = -EAGAIN; | ||
| 685 | } else | ||
| 686 | value = -ENODEV; | ||
| 687 | spin_unlock_irq(&epdata->dev->lock); | ||
| 688 | 544 | ||
| 689 | mutex_unlock(&epdata->lock); | 545 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
| 546 | value = -ENOMEM; | ||
| 547 | if (unlikely(!req)) | ||
| 548 | goto fail; | ||
| 690 | 549 | ||
| 691 | if (unlikely(value)) { | 550 | priv->req = req; |
| 692 | kfree(priv); | 551 | req->buf = buf; |
| 693 | put_ep(epdata); | 552 | req->length = len; |
| 694 | } else | 553 | req->complete = ep_aio_complete; |
| 695 | value = -EIOCBQUEUED; | 554 | req->context = iocb; |
| 555 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
| 556 | if (unlikely(0 != value)) { | ||
| 557 | usb_ep_free_request(epdata->ep, req); | ||
| 558 | goto fail; | ||
| 559 | } | ||
| 560 | spin_unlock_irq(&epdata->dev->lock); | ||
| 561 | return -EIOCBQUEUED; | ||
| 562 | |||
| 563 | fail: | ||
| 564 | spin_unlock_irq(&epdata->dev->lock); | ||
| 565 | kfree(priv->to_free); | ||
| 566 | kfree(priv); | ||
| 567 | put_ep(epdata); | ||
| 696 | return value; | 568 | return value; |
| 697 | } | 569 | } |
| 698 | 570 | ||
| 699 | static ssize_t | 571 | static ssize_t |
| 700 | ep_aio_read(struct kiocb *iocb, const struct iovec *iov, | 572 | ep_read_iter(struct kiocb *iocb, struct iov_iter *to) |
| 701 | unsigned long nr_segs, loff_t o) | ||
| 702 | { | 573 | { |
| 703 | struct ep_data *epdata = iocb->ki_filp->private_data; | 574 | struct file *file = iocb->ki_filp; |
| 704 | char *buf; | 575 | struct ep_data *epdata = file->private_data; |
| 576 | size_t len = iov_iter_count(to); | ||
| 577 | ssize_t value; | ||
| 578 | char *buf; | ||
| 705 | 579 | ||
| 706 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) | 580 | if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) |
| 707 | return -EINVAL; | 581 | return value; |
| 708 | 582 | ||
| 709 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 583 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
| 710 | if (unlikely(!buf)) | 584 | if (usb_endpoint_dir_in(&epdata->desc)) { |
| 711 | return -ENOMEM; | 585 | if (usb_endpoint_xfer_isoc(&epdata->desc) || |
| 586 | !is_sync_kiocb(iocb)) { | ||
| 587 | mutex_unlock(&epdata->lock); | ||
| 588 | return -EINVAL; | ||
| 589 | } | ||
| 590 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
| 591 | spin_lock_irq(&epdata->dev->lock); | ||
| 592 | if (likely(epdata->ep != NULL)) | ||
| 593 | usb_ep_set_halt(epdata->ep); | ||
| 594 | spin_unlock_irq(&epdata->dev->lock); | ||
| 595 | mutex_unlock(&epdata->lock); | ||
| 596 | return -EBADMSG; | ||
| 597 | } | ||
| 712 | 598 | ||
| 713 | return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); | 599 | buf = kmalloc(len, GFP_KERNEL); |
| 600 | if (unlikely(!buf)) { | ||
| 601 | mutex_unlock(&epdata->lock); | ||
| 602 | return -ENOMEM; | ||
| 603 | } | ||
| 604 | if (is_sync_kiocb(iocb)) { | ||
| 605 | value = ep_io(epdata, buf, len); | ||
| 606 | if (value >= 0 && copy_to_iter(buf, value, to)) | ||
| 607 | value = -EFAULT; | ||
| 608 | } else { | ||
| 609 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
| 610 | value = -ENOMEM; | ||
| 611 | if (!priv) | ||
| 612 | goto fail; | ||
| 613 | priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); | ||
| 614 | if (!priv->to_free) { | ||
| 615 | kfree(priv); | ||
| 616 | goto fail; | ||
| 617 | } | ||
| 618 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
| 619 | if (value == -EIOCBQUEUED) | ||
| 620 | buf = NULL; | ||
| 621 | } | ||
| 622 | fail: | ||
| 623 | kfree(buf); | ||
| 624 | mutex_unlock(&epdata->lock); | ||
| 625 | return value; | ||
| 714 | } | 626 | } |
| 715 | 627 | ||
| 628 | static ssize_t ep_config(struct ep_data *, const char *, size_t); | ||
| 629 | |||
| 716 | static ssize_t | 630 | static ssize_t |
| 717 | ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | 631 | ep_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 718 | unsigned long nr_segs, loff_t o) | ||
| 719 | { | 632 | { |
| 720 | struct ep_data *epdata = iocb->ki_filp->private_data; | 633 | struct file *file = iocb->ki_filp; |
| 721 | char *buf; | 634 | struct ep_data *epdata = file->private_data; |
| 722 | size_t len = 0; | 635 | size_t len = iov_iter_count(from); |
| 723 | int i = 0; | 636 | bool configured; |
| 637 | ssize_t value; | ||
| 638 | char *buf; | ||
| 639 | |||
| 640 | if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) | ||
| 641 | return value; | ||
| 724 | 642 | ||
| 725 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) | 643 | configured = epdata->state == STATE_EP_ENABLED; |
| 726 | return -EINVAL; | ||
| 727 | 644 | ||
| 728 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 645 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
| 729 | if (unlikely(!buf)) | 646 | if (configured && !usb_endpoint_dir_in(&epdata->desc)) { |
| 647 | if (usb_endpoint_xfer_isoc(&epdata->desc) || | ||
| 648 | !is_sync_kiocb(iocb)) { | ||
| 649 | mutex_unlock(&epdata->lock); | ||
| 650 | return -EINVAL; | ||
| 651 | } | ||
| 652 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
| 653 | spin_lock_irq(&epdata->dev->lock); | ||
| 654 | if (likely(epdata->ep != NULL)) | ||
| 655 | usb_ep_set_halt(epdata->ep); | ||
| 656 | spin_unlock_irq(&epdata->dev->lock); | ||
| 657 | mutex_unlock(&epdata->lock); | ||
| 658 | return -EBADMSG; | ||
| 659 | } | ||
| 660 | |||
| 661 | buf = kmalloc(len, GFP_KERNEL); | ||
| 662 | if (unlikely(!buf)) { | ||
| 663 | mutex_unlock(&epdata->lock); | ||
| 730 | return -ENOMEM; | 664 | return -ENOMEM; |
| 665 | } | ||
| 731 | 666 | ||
| 732 | for (i=0; i < nr_segs; i++) { | 667 | if (unlikely(copy_from_iter(buf, len, from) != len)) { |
| 733 | if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, | 668 | value = -EFAULT; |
| 734 | iov[i].iov_len) != 0)) { | 669 | goto out; |
| 735 | kfree(buf); | 670 | } |
| 736 | return -EFAULT; | 671 | |
| 672 | if (unlikely(!configured)) { | ||
| 673 | value = ep_config(epdata, buf, len); | ||
| 674 | } else if (is_sync_kiocb(iocb)) { | ||
| 675 | value = ep_io(epdata, buf, len); | ||
| 676 | } else { | ||
| 677 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
| 678 | value = -ENOMEM; | ||
| 679 | if (priv) { | ||
| 680 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
| 681 | if (value == -EIOCBQUEUED) | ||
| 682 | buf = NULL; | ||
| 737 | } | 683 | } |
| 738 | len += iov[i].iov_len; | ||
| 739 | } | 684 | } |
| 740 | return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); | 685 | out: |
| 686 | kfree(buf); | ||
| 687 | mutex_unlock(&epdata->lock); | ||
| 688 | return value; | ||
| 741 | } | 689 | } |
| 742 | 690 | ||
| 743 | /*----------------------------------------------------------------------*/ | 691 | /*----------------------------------------------------------------------*/ |
| @@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 745 | /* used after endpoint configuration */ | 693 | /* used after endpoint configuration */ |
| 746 | static const struct file_operations ep_io_operations = { | 694 | static const struct file_operations ep_io_operations = { |
| 747 | .owner = THIS_MODULE, | 695 | .owner = THIS_MODULE, |
| 748 | .llseek = no_llseek, | ||
| 749 | 696 | ||
| 750 | .read = ep_read, | 697 | .open = ep_open, |
| 751 | .write = ep_write, | ||
| 752 | .unlocked_ioctl = ep_ioctl, | ||
| 753 | .release = ep_release, | 698 | .release = ep_release, |
| 754 | 699 | .llseek = no_llseek, | |
| 755 | .aio_read = ep_aio_read, | 700 | .read = new_sync_read, |
| 756 | .aio_write = ep_aio_write, | 701 | .write = new_sync_write, |
| 702 | .unlocked_ioctl = ep_ioctl, | ||
| 703 | .read_iter = ep_read_iter, | ||
| 704 | .write_iter = ep_write_iter, | ||
| 757 | }; | 705 | }; |
| 758 | 706 | ||
| 759 | /* ENDPOINT INITIALIZATION | 707 | /* ENDPOINT INITIALIZATION |
| @@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { | |||
| 770 | * speed descriptor, then optional high speed descriptor. | 718 | * speed descriptor, then optional high speed descriptor. |
| 771 | */ | 719 | */ |
| 772 | static ssize_t | 720 | static ssize_t |
| 773 | ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | 721 | ep_config (struct ep_data *data, const char *buf, size_t len) |
| 774 | { | 722 | { |
| 775 | struct ep_data *data = fd->private_data; | ||
| 776 | struct usb_ep *ep; | 723 | struct usb_ep *ep; |
| 777 | u32 tag; | 724 | u32 tag; |
| 778 | int value, length = len; | 725 | int value, length = len; |
| 779 | 726 | ||
| 780 | value = mutex_lock_interruptible(&data->lock); | ||
| 781 | if (value < 0) | ||
| 782 | return value; | ||
| 783 | |||
| 784 | if (data->state != STATE_EP_READY) { | 727 | if (data->state != STATE_EP_READY) { |
| 785 | value = -EL2HLT; | 728 | value = -EL2HLT; |
| 786 | goto fail; | 729 | goto fail; |
| @@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 791 | goto fail0; | 734 | goto fail0; |
| 792 | 735 | ||
| 793 | /* we might need to change message format someday */ | 736 | /* we might need to change message format someday */ |
| 794 | if (copy_from_user (&tag, buf, 4)) { | 737 | memcpy(&tag, buf, 4); |
| 795 | goto fail1; | ||
| 796 | } | ||
| 797 | if (tag != 1) { | 738 | if (tag != 1) { |
| 798 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); | 739 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); |
| 799 | goto fail0; | 740 | goto fail0; |
| @@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 806 | */ | 747 | */ |
| 807 | 748 | ||
| 808 | /* full/low speed descriptor, then high speed */ | 749 | /* full/low speed descriptor, then high speed */ |
| 809 | if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { | 750 | memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); |
| 810 | goto fail1; | ||
| 811 | } | ||
| 812 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE | 751 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE |
| 813 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) | 752 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) |
| 814 | goto fail0; | 753 | goto fail0; |
| 815 | if (len != USB_DT_ENDPOINT_SIZE) { | 754 | if (len != USB_DT_ENDPOINT_SIZE) { |
| 816 | if (len != 2 * USB_DT_ENDPOINT_SIZE) | 755 | if (len != 2 * USB_DT_ENDPOINT_SIZE) |
| 817 | goto fail0; | 756 | goto fail0; |
| 818 | if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, | 757 | memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, |
| 819 | USB_DT_ENDPOINT_SIZE)) { | 758 | USB_DT_ENDPOINT_SIZE); |
| 820 | goto fail1; | ||
| 821 | } | ||
| 822 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE | 759 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE |
| 823 | || data->hs_desc.bDescriptorType | 760 | || data->hs_desc.bDescriptorType |
| 824 | != USB_DT_ENDPOINT) { | 761 | != USB_DT_ENDPOINT) { |
| @@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 840 | case USB_SPEED_LOW: | 777 | case USB_SPEED_LOW: |
| 841 | case USB_SPEED_FULL: | 778 | case USB_SPEED_FULL: |
| 842 | ep->desc = &data->desc; | 779 | ep->desc = &data->desc; |
| 843 | value = usb_ep_enable(ep); | ||
| 844 | if (value == 0) | ||
| 845 | data->state = STATE_EP_ENABLED; | ||
| 846 | break; | 780 | break; |
| 847 | case USB_SPEED_HIGH: | 781 | case USB_SPEED_HIGH: |
| 848 | /* fails if caller didn't provide that descriptor... */ | 782 | /* fails if caller didn't provide that descriptor... */ |
| 849 | ep->desc = &data->hs_desc; | 783 | ep->desc = &data->hs_desc; |
| 850 | value = usb_ep_enable(ep); | ||
| 851 | if (value == 0) | ||
| 852 | data->state = STATE_EP_ENABLED; | ||
| 853 | break; | 784 | break; |
| 854 | default: | 785 | default: |
| 855 | DBG(data->dev, "unconnected, %s init abandoned\n", | 786 | DBG(data->dev, "unconnected, %s init abandoned\n", |
| 856 | data->name); | 787 | data->name); |
| 857 | value = -EINVAL; | 788 | value = -EINVAL; |
| 789 | goto gone; | ||
| 858 | } | 790 | } |
| 791 | value = usb_ep_enable(ep); | ||
| 859 | if (value == 0) { | 792 | if (value == 0) { |
| 860 | fd->f_op = &ep_io_operations; | 793 | data->state = STATE_EP_ENABLED; |
| 861 | value = length; | 794 | value = length; |
| 862 | } | 795 | } |
| 863 | gone: | 796 | gone: |
| @@ -867,14 +800,10 @@ fail: | |||
| 867 | data->desc.bDescriptorType = 0; | 800 | data->desc.bDescriptorType = 0; |
| 868 | data->hs_desc.bDescriptorType = 0; | 801 | data->hs_desc.bDescriptorType = 0; |
| 869 | } | 802 | } |
| 870 | mutex_unlock(&data->lock); | ||
| 871 | return value; | 803 | return value; |
| 872 | fail0: | 804 | fail0: |
| 873 | value = -EINVAL; | 805 | value = -EINVAL; |
| 874 | goto fail; | 806 | goto fail; |
| 875 | fail1: | ||
| 876 | value = -EFAULT; | ||
| 877 | goto fail; | ||
| 878 | } | 807 | } |
| 879 | 808 | ||
| 880 | static int | 809 | static int |
| @@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) | |||
| 902 | return value; | 831 | return value; |
| 903 | } | 832 | } |
| 904 | 833 | ||
| 905 | /* used before endpoint configuration */ | ||
| 906 | static const struct file_operations ep_config_operations = { | ||
| 907 | .llseek = no_llseek, | ||
| 908 | |||
| 909 | .open = ep_open, | ||
| 910 | .write = ep_config, | ||
| 911 | .release = ep_release, | ||
| 912 | }; | ||
| 913 | |||
| 914 | /*----------------------------------------------------------------------*/ | 834 | /*----------------------------------------------------------------------*/ |
| 915 | 835 | ||
| 916 | /* EP0 IMPLEMENTATION can be partly in userspace. | 836 | /* EP0 IMPLEMENTATION can be partly in userspace. |
| @@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
| 989 | enum ep0_state state; | 909 | enum ep0_state state; |
| 990 | 910 | ||
| 991 | spin_lock_irq (&dev->lock); | 911 | spin_lock_irq (&dev->lock); |
| 912 | if (dev->state <= STATE_DEV_OPENED) { | ||
| 913 | retval = -EINVAL; | ||
| 914 | goto done; | ||
| 915 | } | ||
| 992 | 916 | ||
| 993 | /* report fd mode change before acting on it */ | 917 | /* report fd mode change before acting on it */ |
| 994 | if (dev->setup_abort) { | 918 | if (dev->setup_abort) { |
| @@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 1187 | struct dev_data *dev = fd->private_data; | 1111 | struct dev_data *dev = fd->private_data; |
| 1188 | ssize_t retval = -ESRCH; | 1112 | ssize_t retval = -ESRCH; |
| 1189 | 1113 | ||
| 1190 | spin_lock_irq (&dev->lock); | ||
| 1191 | |||
| 1192 | /* report fd mode change before acting on it */ | 1114 | /* report fd mode change before acting on it */ |
| 1193 | if (dev->setup_abort) { | 1115 | if (dev->setup_abort) { |
| 1194 | dev->setup_abort = 0; | 1116 | dev->setup_abort = 0; |
| @@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 1234 | } else | 1156 | } else |
| 1235 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); | 1157 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); |
| 1236 | 1158 | ||
| 1237 | spin_unlock_irq (&dev->lock); | ||
| 1238 | return retval; | 1159 | return retval; |
| 1239 | } | 1160 | } |
| 1240 | 1161 | ||
| @@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) | |||
| 1281 | struct dev_data *dev = fd->private_data; | 1202 | struct dev_data *dev = fd->private_data; |
| 1282 | int mask = 0; | 1203 | int mask = 0; |
| 1283 | 1204 | ||
| 1205 | if (dev->state <= STATE_DEV_OPENED) | ||
| 1206 | return DEFAULT_POLLMASK; | ||
| 1207 | |||
| 1284 | poll_wait(fd, &dev->wait, wait); | 1208 | poll_wait(fd, &dev->wait, wait); |
| 1285 | 1209 | ||
| 1286 | spin_lock_irq (&dev->lock); | 1210 | spin_lock_irq (&dev->lock); |
| @@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) | |||
| 1316 | return ret; | 1240 | return ret; |
| 1317 | } | 1241 | } |
| 1318 | 1242 | ||
| 1319 | /* used after device configuration */ | ||
| 1320 | static const struct file_operations ep0_io_operations = { | ||
| 1321 | .owner = THIS_MODULE, | ||
| 1322 | .llseek = no_llseek, | ||
| 1323 | |||
| 1324 | .read = ep0_read, | ||
| 1325 | .write = ep0_write, | ||
| 1326 | .fasync = ep0_fasync, | ||
| 1327 | .poll = ep0_poll, | ||
| 1328 | .unlocked_ioctl = dev_ioctl, | ||
| 1329 | .release = dev_release, | ||
| 1330 | }; | ||
| 1331 | |||
| 1332 | /*----------------------------------------------------------------------*/ | 1243 | /*----------------------------------------------------------------------*/ |
| 1333 | 1244 | ||
| 1334 | /* The in-kernel gadget driver handles most ep0 issues, in particular | 1245 | /* The in-kernel gadget driver handles most ep0 issues, in particular |
| @@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) | |||
| 1650 | goto enomem1; | 1561 | goto enomem1; |
| 1651 | 1562 | ||
| 1652 | data->dentry = gadgetfs_create_file (dev->sb, data->name, | 1563 | data->dentry = gadgetfs_create_file (dev->sb, data->name, |
| 1653 | data, &ep_config_operations); | 1564 | data, &ep_io_operations); |
| 1654 | if (!data->dentry) | 1565 | if (!data->dentry) |
| 1655 | goto enomem2; | 1566 | goto enomem2; |
| 1656 | list_add_tail (&data->epfiles, &dev->epfiles); | 1567 | list_add_tail (&data->epfiles, &dev->epfiles); |
| @@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 1852 | u32 tag; | 1763 | u32 tag; |
| 1853 | char *kbuf; | 1764 | char *kbuf; |
| 1854 | 1765 | ||
| 1766 | spin_lock_irq(&dev->lock); | ||
| 1767 | if (dev->state > STATE_DEV_OPENED) { | ||
| 1768 | value = ep0_write(fd, buf, len, ptr); | ||
| 1769 | spin_unlock_irq(&dev->lock); | ||
| 1770 | return value; | ||
| 1771 | } | ||
| 1772 | spin_unlock_irq(&dev->lock); | ||
| 1773 | |||
| 1855 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) | 1774 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) |
| 1856 | return -EINVAL; | 1775 | return -EINVAL; |
| 1857 | 1776 | ||
| @@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
| 1925 | * on, they can work ... except in cleanup paths that | 1844 | * on, they can work ... except in cleanup paths that |
| 1926 | * kick in after the ep0 descriptor is closed. | 1845 | * kick in after the ep0 descriptor is closed. |
| 1927 | */ | 1846 | */ |
| 1928 | fd->f_op = &ep0_io_operations; | ||
| 1929 | value = len; | 1847 | value = len; |
| 1930 | } | 1848 | } |
| 1931 | return value; | 1849 | return value; |
| @@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) | |||
| 1956 | return value; | 1874 | return value; |
| 1957 | } | 1875 | } |
| 1958 | 1876 | ||
| 1959 | static const struct file_operations dev_init_operations = { | 1877 | static const struct file_operations ep0_operations = { |
| 1960 | .llseek = no_llseek, | 1878 | .llseek = no_llseek, |
| 1961 | 1879 | ||
| 1962 | .open = dev_open, | 1880 | .open = dev_open, |
| 1881 | .read = ep0_read, | ||
| 1963 | .write = dev_config, | 1882 | .write = dev_config, |
| 1964 | .fasync = ep0_fasync, | 1883 | .fasync = ep0_fasync, |
| 1884 | .poll = ep0_poll, | ||
| 1965 | .unlocked_ioctl = dev_ioctl, | 1885 | .unlocked_ioctl = dev_ioctl, |
| 1966 | .release = dev_release, | 1886 | .release = dev_release, |
| 1967 | }; | 1887 | }; |
| @@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) | |||
| 2077 | goto Enomem; | 1997 | goto Enomem; |
| 2078 | 1998 | ||
| 2079 | dev->sb = sb; | 1999 | dev->sb = sb; |
| 2080 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); | 2000 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); |
| 2081 | if (!dev->dentry) { | 2001 | if (!dev->dentry) { |
| 2082 | put_dev(dev); | 2002 | put_dev(dev); |
| 2083 | goto Enomem; | 2003 | goto Enomem; |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7f76c8a12f89..fd53c9ebd662 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -37,6 +37,9 @@ | |||
| 37 | 37 | ||
| 38 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 | 38 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 |
| 39 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 | 39 | #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 |
| 40 | #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 | ||
| 41 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f | ||
| 42 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f | ||
| 40 | 43 | ||
| 41 | static const char hcd_name[] = "xhci_hcd"; | 44 | static const char hcd_name[] = "xhci_hcd"; |
| 42 | 45 | ||
| @@ -133,6 +136,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 133 | pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { | 136 | pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { |
| 134 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; | 137 | xhci->quirks |= XHCI_SPURIOUS_REBOOT; |
| 135 | } | 138 | } |
| 139 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | ||
| 140 | (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || | ||
| 141 | pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || | ||
| 142 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { | ||
| 143 | xhci->quirks |= XHCI_PME_STUCK_QUIRK; | ||
| 144 | } | ||
| 136 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && | 145 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && |
| 137 | pdev->device == PCI_DEVICE_ID_EJ168) { | 146 | pdev->device == PCI_DEVICE_ID_EJ168) { |
| 138 | xhci->quirks |= XHCI_RESET_ON_RESUME; | 147 | xhci->quirks |= XHCI_RESET_ON_RESUME; |
| @@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 159 | "QUIRK: Resetting on resume"); | 168 | "QUIRK: Resetting on resume"); |
| 160 | } | 169 | } |
| 161 | 170 | ||
| 171 | /* | ||
| 172 | * Make sure PME works on some Intel xHCI controllers by writing 1 to clear | ||
| 173 | * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 | ||
| 174 | */ | ||
| 175 | static void xhci_pme_quirk(struct xhci_hcd *xhci) | ||
| 176 | { | ||
| 177 | u32 val; | ||
| 178 | void __iomem *reg; | ||
| 179 | |||
| 180 | reg = (void __iomem *) xhci->cap_regs + 0x80a4; | ||
| 181 | val = readl(reg); | ||
| 182 | writel(val | BIT(28), reg); | ||
| 183 | readl(reg); | ||
| 184 | } | ||
| 185 | |||
| 162 | /* called during probe() after chip reset completes */ | 186 | /* called during probe() after chip reset completes */ |
| 163 | static int xhci_pci_setup(struct usb_hcd *hcd) | 187 | static int xhci_pci_setup(struct usb_hcd *hcd) |
| 164 | { | 188 | { |
| @@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) | |||
| 283 | if (xhci->quirks & XHCI_COMP_MODE_QUIRK) | 307 | if (xhci->quirks & XHCI_COMP_MODE_QUIRK) |
| 284 | pdev->no_d3cold = true; | 308 | pdev->no_d3cold = true; |
| 285 | 309 | ||
| 310 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) | ||
| 311 | xhci_pme_quirk(xhci); | ||
| 312 | |||
| 286 | return xhci_suspend(xhci, do_wakeup); | 313 | return xhci_suspend(xhci, do_wakeup); |
| 287 | } | 314 | } |
| 288 | 315 | ||
| @@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) | |||
| 313 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) | 340 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) |
| 314 | usb_enable_intel_xhci_ports(pdev); | 341 | usb_enable_intel_xhci_ports(pdev); |
| 315 | 342 | ||
| 343 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) | ||
| 344 | xhci_pme_quirk(xhci); | ||
| 345 | |||
| 316 | retval = xhci_resume(xhci, hibernated); | 346 | retval = xhci_resume(xhci, hibernated); |
| 317 | return retval; | 347 | return retval; |
| 318 | } | 348 | } |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 08d402b15482..0e11d61408ff 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
| @@ -83,16 +83,6 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
| 83 | if (irq < 0) | 83 | if (irq < 0) |
| 84 | return -ENODEV; | 84 | return -ENODEV; |
| 85 | 85 | ||
| 86 | |||
| 87 | if (of_device_is_compatible(pdev->dev.of_node, | ||
| 88 | "marvell,armada-375-xhci") || | ||
| 89 | of_device_is_compatible(pdev->dev.of_node, | ||
| 90 | "marvell,armada-380-xhci")) { | ||
| 91 | ret = xhci_mvebu_mbus_init_quirk(pdev); | ||
| 92 | if (ret) | ||
| 93 | return ret; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* Initialize dma_mask and coherent_dma_mask to 32-bits */ | 86 | /* Initialize dma_mask and coherent_dma_mask to 32-bits */ |
| 97 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | 87 | ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
| 98 | if (ret) | 88 | if (ret) |
| @@ -127,6 +117,15 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
| 127 | goto put_hcd; | 117 | goto put_hcd; |
| 128 | } | 118 | } |
| 129 | 119 | ||
| 120 | if (of_device_is_compatible(pdev->dev.of_node, | ||
| 121 | "marvell,armada-375-xhci") || | ||
| 122 | of_device_is_compatible(pdev->dev.of_node, | ||
| 123 | "marvell,armada-380-xhci")) { | ||
| 124 | ret = xhci_mvebu_mbus_init_quirk(pdev); | ||
| 125 | if (ret) | ||
| 126 | goto disable_clk; | ||
| 127 | } | ||
| 128 | |||
| 130 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | 129 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); |
| 131 | if (ret) | 130 | if (ret) |
| 132 | goto disable_clk; | 131 | goto disable_clk; |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 88da8d629820..5fb66db89e05 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -1729,7 +1729,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, | |||
| 1729 | if (!command) | 1729 | if (!command) |
| 1730 | return; | 1730 | return; |
| 1731 | 1731 | ||
| 1732 | ep->ep_state |= EP_HALTED; | 1732 | ep->ep_state |= EP_HALTED | EP_RECENTLY_HALTED; |
| 1733 | ep->stopped_stream = stream_id; | 1733 | ep->stopped_stream = stream_id; |
| 1734 | 1734 | ||
| 1735 | xhci_queue_reset_ep(xhci, command, slot_id, ep_index); | 1735 | xhci_queue_reset_ep(xhci, command, slot_id, ep_index); |
| @@ -1946,7 +1946,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
| 1946 | if (event_trb != ep_ring->dequeue) { | 1946 | if (event_trb != ep_ring->dequeue) { |
| 1947 | /* The event was for the status stage */ | 1947 | /* The event was for the status stage */ |
| 1948 | if (event_trb == td->last_trb) { | 1948 | if (event_trb == td->last_trb) { |
| 1949 | if (td->urb->actual_length != 0) { | 1949 | if (td->urb_length_set) { |
| 1950 | /* Don't overwrite a previously set error code | 1950 | /* Don't overwrite a previously set error code |
| 1951 | */ | 1951 | */ |
| 1952 | if ((*status == -EINPROGRESS || *status == 0) && | 1952 | if ((*status == -EINPROGRESS || *status == 0) && |
| @@ -1960,7 +1960,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
| 1960 | td->urb->transfer_buffer_length; | 1960 | td->urb->transfer_buffer_length; |
| 1961 | } | 1961 | } |
| 1962 | } else { | 1962 | } else { |
| 1963 | /* Maybe the event was for the data stage? */ | 1963 | /* |
| 1964 | * Maybe the event was for the data stage? If so, update | ||
| 1965 | * already the actual_length of the URB and flag it as | ||
| 1966 | * set, so that it is not overwritten in the event for | ||
| 1967 | * the last TRB. | ||
| 1968 | */ | ||
| 1969 | td->urb_length_set = true; | ||
| 1964 | td->urb->actual_length = | 1970 | td->urb->actual_length = |
| 1965 | td->urb->transfer_buffer_length - | 1971 | td->urb->transfer_buffer_length - |
| 1966 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); | 1972 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ec8ac1674854..b06d1a53652d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -1338,6 +1338,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) | |||
| 1338 | goto exit; | 1338 | goto exit; |
| 1339 | } | 1339 | } |
| 1340 | 1340 | ||
| 1341 | /* Reject urb if endpoint is in soft reset, queue must stay empty */ | ||
| 1342 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & EP_CONFIG_PENDING) { | ||
| 1343 | xhci_warn(xhci, "Can't enqueue URB while ep is in soft reset\n"); | ||
| 1344 | ret = -EINVAL; | ||
| 1345 | } | ||
| 1346 | |||
| 1341 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) | 1347 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) |
| 1342 | size = urb->number_of_packets; | 1348 | size = urb->number_of_packets; |
| 1343 | else | 1349 | else |
| @@ -2948,23 +2954,36 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, | |||
| 2948 | } | 2954 | } |
| 2949 | } | 2955 | } |
| 2950 | 2956 | ||
| 2951 | /* Called when clearing halted device. The core should have sent the control | 2957 | /* Called after clearing a halted device. USB core should have sent the control |
| 2952 | * message to clear the device halt condition. The host side of the halt should | 2958 | * message to clear the device halt condition. The host side of the halt should |
| 2953 | * already be cleared with a reset endpoint command issued when the STALL tx | 2959 | * already be cleared with a reset endpoint command issued immediately when the |
| 2954 | * event was received. | 2960 | * STALL tx event was received. |
| 2955 | * | ||
| 2956 | * Context: in_interrupt | ||
| 2957 | */ | 2961 | */ |
| 2958 | 2962 | ||
| 2959 | void xhci_endpoint_reset(struct usb_hcd *hcd, | 2963 | void xhci_endpoint_reset(struct usb_hcd *hcd, |
| 2960 | struct usb_host_endpoint *ep) | 2964 | struct usb_host_endpoint *ep) |
| 2961 | { | 2965 | { |
| 2962 | struct xhci_hcd *xhci; | 2966 | struct xhci_hcd *xhci; |
| 2967 | struct usb_device *udev; | ||
| 2968 | struct xhci_virt_device *virt_dev; | ||
| 2969 | struct xhci_virt_ep *virt_ep; | ||
| 2970 | struct xhci_input_control_ctx *ctrl_ctx; | ||
| 2971 | struct xhci_command *command; | ||
| 2972 | unsigned int ep_index, ep_state; | ||
| 2973 | unsigned long flags; | ||
| 2974 | u32 ep_flag; | ||
| 2963 | 2975 | ||
| 2964 | xhci = hcd_to_xhci(hcd); | 2976 | xhci = hcd_to_xhci(hcd); |
| 2977 | udev = (struct usb_device *) ep->hcpriv; | ||
| 2978 | if (!ep->hcpriv) | ||
| 2979 | return; | ||
| 2980 | virt_dev = xhci->devs[udev->slot_id]; | ||
| 2981 | ep_index = xhci_get_endpoint_index(&ep->desc); | ||
| 2982 | virt_ep = &virt_dev->eps[ep_index]; | ||
| 2983 | ep_state = virt_ep->ep_state; | ||
| 2965 | 2984 | ||
| 2966 | /* | 2985 | /* |
| 2967 | * We might need to implement the config ep cmd in xhci 4.8.1 note: | 2986 | * Implement the config ep command in xhci 4.6.8 additional note: |
| 2968 | * The Reset Endpoint Command may only be issued to endpoints in the | 2987 | * The Reset Endpoint Command may only be issued to endpoints in the |
| 2969 | * Halted state. If software wishes reset the Data Toggle or Sequence | 2988 | * Halted state. If software wishes reset the Data Toggle or Sequence |
| 2970 | * Number of an endpoint that isn't in the Halted state, then software | 2989 | * Number of an endpoint that isn't in the Halted state, then software |
| @@ -2972,9 +2991,72 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, | |||
| 2972 | * for the target endpoint. that is in the Stopped state. | 2991 | * for the target endpoint. that is in the Stopped state. |
| 2973 | */ | 2992 | */ |
| 2974 | 2993 | ||
| 2975 | /* For now just print debug to follow the situation */ | 2994 | if (ep_state & SET_DEQ_PENDING || ep_state & EP_RECENTLY_HALTED) { |
| 2976 | xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", | 2995 | virt_ep->ep_state &= ~EP_RECENTLY_HALTED; |
| 2977 | ep->desc.bEndpointAddress); | 2996 | xhci_dbg(xhci, "ep recently halted, no toggle reset needed\n"); |
| 2997 | return; | ||
| 2998 | } | ||
| 2999 | |||
| 3000 | /* Only interrupt and bulk ep's use Data toggle, USB2 spec 5.5.4-> */ | ||
| 3001 | if (usb_endpoint_xfer_control(&ep->desc) || | ||
| 3002 | usb_endpoint_xfer_isoc(&ep->desc)) | ||
| 3003 | return; | ||
| 3004 | |||
| 3005 | ep_flag = xhci_get_endpoint_flag(&ep->desc); | ||
| 3006 | |||
| 3007 | if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) | ||
| 3008 | return; | ||
| 3009 | |||
| 3010 | command = xhci_alloc_command(xhci, true, true, GFP_NOWAIT); | ||
| 3011 | if (!command) { | ||
| 3012 | xhci_err(xhci, "Could not allocate xHCI command structure.\n"); | ||
| 3013 | return; | ||
| 3014 | } | ||
| 3015 | |||
| 3016 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 3017 | |||
| 3018 | /* block ringing ep doorbell */ | ||
| 3019 | virt_ep->ep_state |= EP_CONFIG_PENDING; | ||
| 3020 | |||
| 3021 | /* | ||
| 3022 | * Make sure endpoint ring is empty before resetting the toggle/seq. | ||
| 3023 | * Driver is required to synchronously cancel all transfer request. | ||
| 3024 | * | ||
| 3025 | * xhci 4.6.6 says we can issue a configure endpoint command on a | ||
| 3026 | * running endpoint ring as long as it's idle (queue empty) | ||
| 3027 | */ | ||
| 3028 | |||
| 3029 | if (!list_empty(&virt_ep->ring->td_list)) { | ||
| 3030 | dev_err(&udev->dev, "EP not empty, refuse reset\n"); | ||
| 3031 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 3032 | goto cleanup; | ||
| 3033 | } | ||
| 3034 | |||
| 3035 | xhci_dbg(xhci, "Reset toggle/seq for slot %d, ep_index: %d\n", | ||
| 3036 | udev->slot_id, ep_index); | ||
| 3037 | |||
| 3038 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); | ||
| 3039 | if (!ctrl_ctx) { | ||
| 3040 | xhci_err(xhci, "Could not get input context, bad type. virt_dev: %p, in_ctx %p\n", | ||
| 3041 | virt_dev, virt_dev->in_ctx); | ||
| 3042 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 3043 | goto cleanup; | ||
| 3044 | } | ||
| 3045 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, | ||
| 3046 | virt_dev->out_ctx, ctrl_ctx, | ||
| 3047 | ep_flag, ep_flag); | ||
| 3048 | xhci_endpoint_copy(xhci, command->in_ctx, virt_dev->out_ctx, ep_index); | ||
| 3049 | |||
| 3050 | xhci_queue_configure_endpoint(xhci, command, command->in_ctx->dma, | ||
| 3051 | udev->slot_id, false); | ||
| 3052 | xhci_ring_cmd_db(xhci); | ||
| 3053 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 3054 | |||
| 3055 | wait_for_completion(command->completion); | ||
| 3056 | |||
| 3057 | cleanup: | ||
| 3058 | virt_ep->ep_state &= ~EP_CONFIG_PENDING; | ||
| 3059 | xhci_free_command(xhci, command); | ||
| 2978 | } | 3060 | } |
| 2979 | 3061 | ||
| 2980 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, | 3062 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 974514762a14..265ab1771d24 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | |||
| 1 | /* | 2 | /* |
| 2 | * xHCI host controller driver | 3 | * xHCI host controller driver |
| 3 | * | 4 | * |
| @@ -88,9 +89,10 @@ struct xhci_cap_regs { | |||
| 88 | #define HCS_IST(p) (((p) >> 0) & 0xf) | 89 | #define HCS_IST(p) (((p) >> 0) & 0xf) |
| 89 | /* bits 4:7, max number of Event Ring segments */ | 90 | /* bits 4:7, max number of Event Ring segments */ |
| 90 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) | 91 | #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) |
| 92 | /* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */ | ||
| 91 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ | 93 | /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ |
| 92 | /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ | 94 | /* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */ |
| 93 | #define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) | 95 | #define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f)) |
| 94 | 96 | ||
| 95 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ | 97 | /* HCSPARAMS3 - hcs_params3 - bitmasks */ |
| 96 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ | 98 | /* bits 0:7, Max U1 to U0 latency for the roothub ports */ |
| @@ -863,6 +865,8 @@ struct xhci_virt_ep { | |||
| 863 | #define EP_HAS_STREAMS (1 << 4) | 865 | #define EP_HAS_STREAMS (1 << 4) |
| 864 | /* Transitioning the endpoint to not using streams, don't enqueue URBs */ | 866 | /* Transitioning the endpoint to not using streams, don't enqueue URBs */ |
| 865 | #define EP_GETTING_NO_STREAMS (1 << 5) | 867 | #define EP_GETTING_NO_STREAMS (1 << 5) |
| 868 | #define EP_RECENTLY_HALTED (1 << 6) | ||
| 869 | #define EP_CONFIG_PENDING (1 << 7) | ||
| 866 | /* ---- Related to URB cancellation ---- */ | 870 | /* ---- Related to URB cancellation ---- */ |
| 867 | struct list_head cancelled_td_list; | 871 | struct list_head cancelled_td_list; |
| 868 | struct xhci_td *stopped_td; | 872 | struct xhci_td *stopped_td; |
| @@ -1288,6 +1292,8 @@ struct xhci_td { | |||
| 1288 | struct xhci_segment *start_seg; | 1292 | struct xhci_segment *start_seg; |
| 1289 | union xhci_trb *first_trb; | 1293 | union xhci_trb *first_trb; |
| 1290 | union xhci_trb *last_trb; | 1294 | union xhci_trb *last_trb; |
| 1295 | /* actual_length of the URB has already been set */ | ||
| 1296 | bool urb_length_set; | ||
| 1291 | }; | 1297 | }; |
| 1292 | 1298 | ||
| 1293 | /* xHCI command default timeout value */ | 1299 | /* xHCI command default timeout value */ |
| @@ -1560,6 +1566,7 @@ struct xhci_hcd { | |||
| 1560 | #define XHCI_SPURIOUS_WAKEUP (1 << 18) | 1566 | #define XHCI_SPURIOUS_WAKEUP (1 << 18) |
| 1561 | /* For controllers with a broken beyond repair streams implementation */ | 1567 | /* For controllers with a broken beyond repair streams implementation */ |
| 1562 | #define XHCI_BROKEN_STREAMS (1 << 19) | 1568 | #define XHCI_BROKEN_STREAMS (1 << 19) |
| 1569 | #define XHCI_PME_STUCK_QUIRK (1 << 20) | ||
| 1563 | unsigned int num_active_eps; | 1570 | unsigned int num_active_eps; |
| 1564 | unsigned int limit_active_eps; | 1571 | unsigned int limit_active_eps; |
| 1565 | /* There are two roothubs to keep track of bus suspend info for */ | 1572 | /* There are two roothubs to keep track of bus suspend info for */ |
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c index eba9b82e2d70..3cb98b1d5d29 100644 --- a/drivers/usb/isp1760/isp1760-hcd.c +++ b/drivers/usb/isp1760/isp1760-hcd.c | |||
| @@ -1274,7 +1274,7 @@ static void errata2_function(unsigned long data) | |||
| 1274 | for (slot = 0; slot < 32; slot++) | 1274 | for (slot = 0; slot < 32; slot++) |
| 1275 | if (priv->atl_slots[slot].qh && time_after(jiffies, | 1275 | if (priv->atl_slots[slot].qh && time_after(jiffies, |
| 1276 | priv->atl_slots[slot].timestamp + | 1276 | priv->atl_slots[slot].timestamp + |
| 1277 | SLOT_TIMEOUT * HZ / 1000)) { | 1277 | msecs_to_jiffies(SLOT_TIMEOUT))) { |
| 1278 | ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); | 1278 | ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd); |
| 1279 | if (!FROM_DW0_VALID(ptd.dw0) && | 1279 | if (!FROM_DW0_VALID(ptd.dw0) && |
| 1280 | !FROM_DW3_ACTIVE(ptd.dw3)) | 1280 | !FROM_DW3_ACTIVE(ptd.dw3)) |
| @@ -1286,7 +1286,7 @@ static void errata2_function(unsigned long data) | |||
| 1286 | 1286 | ||
| 1287 | spin_unlock_irqrestore(&priv->lock, spinflags); | 1287 | spin_unlock_irqrestore(&priv->lock, spinflags); |
| 1288 | 1288 | ||
| 1289 | errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; | 1289 | errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD); |
| 1290 | add_timer(&errata2_timer); | 1290 | add_timer(&errata2_timer); |
| 1291 | } | 1291 | } |
| 1292 | 1292 | ||
| @@ -1336,7 +1336,7 @@ static int isp1760_run(struct usb_hcd *hcd) | |||
| 1336 | return retval; | 1336 | return retval; |
| 1337 | 1337 | ||
| 1338 | setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd); | 1338 | setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd); |
| 1339 | errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000; | 1339 | errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD); |
| 1340 | add_timer(&errata2_timer); | 1340 | add_timer(&errata2_timer); |
| 1341 | 1341 | ||
| 1342 | chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG); | 1342 | chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index e6f4cbfeed97..067920f2d570 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -1969,10 +1969,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 1969 | goto fail0; | 1969 | goto fail0; |
| 1970 | } | 1970 | } |
| 1971 | 1971 | ||
| 1972 | pm_runtime_use_autosuspend(musb->controller); | ||
| 1973 | pm_runtime_set_autosuspend_delay(musb->controller, 200); | ||
| 1974 | pm_runtime_enable(musb->controller); | ||
| 1975 | |||
| 1976 | spin_lock_init(&musb->lock); | 1972 | spin_lock_init(&musb->lock); |
| 1977 | musb->board_set_power = plat->set_power; | 1973 | musb->board_set_power = plat->set_power; |
| 1978 | musb->min_power = plat->min_power; | 1974 | musb->min_power = plat->min_power; |
| @@ -1991,6 +1987,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 1991 | musb_readl = musb_default_readl; | 1987 | musb_readl = musb_default_readl; |
| 1992 | musb_writel = musb_default_writel; | 1988 | musb_writel = musb_default_writel; |
| 1993 | 1989 | ||
| 1990 | /* We need musb_read/write functions initialized for PM */ | ||
| 1991 | pm_runtime_use_autosuspend(musb->controller); | ||
| 1992 | pm_runtime_set_autosuspend_delay(musb->controller, 200); | ||
| 1993 | pm_runtime_irq_safe(musb->controller); | ||
| 1994 | pm_runtime_enable(musb->controller); | ||
| 1995 | |||
| 1994 | /* The musb_platform_init() call: | 1996 | /* The musb_platform_init() call: |
| 1995 | * - adjusts musb->mregs | 1997 | * - adjusts musb->mregs |
| 1996 | * - sets the musb->isr | 1998 | * - sets the musb->isr |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 53bd0e71d19f..a900c9877195 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
| @@ -457,12 +457,27 @@ static int dsps_musb_init(struct musb *musb) | |||
| 457 | if (IS_ERR(musb->xceiv)) | 457 | if (IS_ERR(musb->xceiv)) |
| 458 | return PTR_ERR(musb->xceiv); | 458 | return PTR_ERR(musb->xceiv); |
| 459 | 459 | ||
| 460 | musb->phy = devm_phy_get(dev->parent, "usb2-phy"); | ||
| 461 | |||
| 460 | /* Returns zero if e.g. not clocked */ | 462 | /* Returns zero if e.g. not clocked */ |
| 461 | rev = dsps_readl(reg_base, wrp->revision); | 463 | rev = dsps_readl(reg_base, wrp->revision); |
| 462 | if (!rev) | 464 | if (!rev) |
| 463 | return -ENODEV; | 465 | return -ENODEV; |
| 464 | 466 | ||
| 465 | usb_phy_init(musb->xceiv); | 467 | usb_phy_init(musb->xceiv); |
| 468 | if (IS_ERR(musb->phy)) { | ||
| 469 | musb->phy = NULL; | ||
| 470 | } else { | ||
| 471 | ret = phy_init(musb->phy); | ||
| 472 | if (ret < 0) | ||
| 473 | return ret; | ||
| 474 | ret = phy_power_on(musb->phy); | ||
| 475 | if (ret) { | ||
| 476 | phy_exit(musb->phy); | ||
| 477 | return ret; | ||
| 478 | } | ||
| 479 | } | ||
| 480 | |||
| 466 | setup_timer(&glue->timer, otg_timer, (unsigned long) musb); | 481 | setup_timer(&glue->timer, otg_timer, (unsigned long) musb); |
| 467 | 482 | ||
| 468 | /* Reset the musb */ | 483 | /* Reset the musb */ |
| @@ -502,6 +517,8 @@ static int dsps_musb_exit(struct musb *musb) | |||
| 502 | 517 | ||
| 503 | del_timer_sync(&glue->timer); | 518 | del_timer_sync(&glue->timer); |
| 504 | usb_phy_shutdown(musb->xceiv); | 519 | usb_phy_shutdown(musb->xceiv); |
| 520 | phy_power_off(musb->phy); | ||
| 521 | phy_exit(musb->phy); | ||
| 505 | debugfs_remove_recursive(glue->dbgfs_root); | 522 | debugfs_remove_recursive(glue->dbgfs_root); |
| 506 | 523 | ||
| 507 | return 0; | 524 | return 0; |
| @@ -610,7 +627,7 @@ static int dsps_musb_reset(struct musb *musb) | |||
| 610 | struct device *dev = musb->controller; | 627 | struct device *dev = musb->controller; |
| 611 | struct dsps_glue *glue = dev_get_drvdata(dev->parent); | 628 | struct dsps_glue *glue = dev_get_drvdata(dev->parent); |
| 612 | const struct dsps_musb_wrapper *wrp = glue->wrp; | 629 | const struct dsps_musb_wrapper *wrp = glue->wrp; |
| 613 | int session_restart = 0; | 630 | int session_restart = 0, error; |
| 614 | 631 | ||
| 615 | if (glue->sw_babble_enabled) | 632 | if (glue->sw_babble_enabled) |
| 616 | session_restart = sw_babble_control(musb); | 633 | session_restart = sw_babble_control(musb); |
| @@ -624,8 +641,14 @@ static int dsps_musb_reset(struct musb *musb) | |||
| 624 | dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset)); | 641 | dsps_writel(musb->ctrl_base, wrp->control, (1 << wrp->reset)); |
| 625 | usleep_range(100, 200); | 642 | usleep_range(100, 200); |
| 626 | usb_phy_shutdown(musb->xceiv); | 643 | usb_phy_shutdown(musb->xceiv); |
| 644 | error = phy_power_off(musb->phy); | ||
| 645 | if (error) | ||
| 646 | dev_err(dev, "phy shutdown failed: %i\n", error); | ||
| 627 | usleep_range(100, 200); | 647 | usleep_range(100, 200); |
| 628 | usb_phy_init(musb->xceiv); | 648 | usb_phy_init(musb->xceiv); |
| 649 | error = phy_power_on(musb->phy); | ||
| 650 | if (error) | ||
| 651 | dev_err(dev, "phy powerup failed: %i\n", error); | ||
| 629 | session_restart = 1; | 652 | session_restart = 1; |
| 630 | } | 653 | } |
| 631 | 654 | ||
| @@ -687,7 +710,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, | |||
| 687 | struct musb_hdrc_config *config; | 710 | struct musb_hdrc_config *config; |
| 688 | struct platform_device *musb; | 711 | struct platform_device *musb; |
| 689 | struct device_node *dn = parent->dev.of_node; | 712 | struct device_node *dn = parent->dev.of_node; |
| 690 | int ret; | 713 | int ret, val; |
| 691 | 714 | ||
| 692 | memset(resources, 0, sizeof(resources)); | 715 | memset(resources, 0, sizeof(resources)); |
| 693 | res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc"); | 716 | res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc"); |
| @@ -739,7 +762,10 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, | |||
| 739 | pdata.mode = get_musb_port_mode(dev); | 762 | pdata.mode = get_musb_port_mode(dev); |
| 740 | /* DT keeps this entry in mA, musb expects it as per USB spec */ | 763 | /* DT keeps this entry in mA, musb expects it as per USB spec */ |
| 741 | pdata.power = get_int_prop(dn, "mentor,power") / 2; | 764 | pdata.power = get_int_prop(dn, "mentor,power") / 2; |
| 742 | config->multipoint = of_property_read_bool(dn, "mentor,multipoint"); | 765 | |
| 766 | ret = of_property_read_u32(dn, "mentor,multipoint", &val); | ||
| 767 | if (!ret && val) | ||
| 768 | config->multipoint = true; | ||
| 743 | 769 | ||
| 744 | ret = platform_device_add_data(musb, &pdata, sizeof(pdata)); | 770 | ret = platform_device_add_data(musb, &pdata, sizeof(pdata)); |
| 745 | if (ret) { | 771 | if (ret) { |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 883a9adfdfff..c3d5fc9dfb5b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -2613,7 +2613,7 @@ static const struct hc_driver musb_hc_driver = { | |||
| 2613 | .description = "musb-hcd", | 2613 | .description = "musb-hcd", |
| 2614 | .product_desc = "MUSB HDRC host driver", | 2614 | .product_desc = "MUSB HDRC host driver", |
| 2615 | .hcd_priv_size = sizeof(struct musb *), | 2615 | .hcd_priv_size = sizeof(struct musb *), |
| 2616 | .flags = HCD_USB2 | HCD_MEMORY, | 2616 | .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, |
| 2617 | 2617 | ||
| 2618 | /* not using irq handler or reset hooks from usbcore, since | 2618 | /* not using irq handler or reset hooks from usbcore, since |
| 2619 | * those must be shared with peripheral code for OTG configs | 2619 | * those must be shared with peripheral code for OTG configs |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 763649eb4987..cc752d8c7773 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
| @@ -516,7 +516,7 @@ static int omap2430_probe(struct platform_device *pdev) | |||
| 516 | struct omap2430_glue *glue; | 516 | struct omap2430_glue *glue; |
| 517 | struct device_node *np = pdev->dev.of_node; | 517 | struct device_node *np = pdev->dev.of_node; |
| 518 | struct musb_hdrc_config *config; | 518 | struct musb_hdrc_config *config; |
| 519 | int ret = -ENOMEM; | 519 | int ret = -ENOMEM, val; |
| 520 | 520 | ||
| 521 | glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); | 521 | glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); |
| 522 | if (!glue) | 522 | if (!glue) |
| @@ -559,7 +559,10 @@ static int omap2430_probe(struct platform_device *pdev) | |||
| 559 | of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); | 559 | of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); |
| 560 | of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); | 560 | of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); |
| 561 | of_property_read_u32(np, "power", (u32 *)&pdata->power); | 561 | of_property_read_u32(np, "power", (u32 *)&pdata->power); |
| 562 | config->multipoint = of_property_read_bool(np, "multipoint"); | 562 | |
| 563 | ret = of_property_read_u32(np, "multipoint", &val); | ||
| 564 | if (!ret && val) | ||
| 565 | config->multipoint = true; | ||
| 563 | 566 | ||
| 564 | pdata->board_data = data; | 567 | pdata->board_data = data; |
| 565 | pdata->config = config; | 568 | pdata->config = config; |
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig index de83b9d0cd5c..ebc99ee076ce 100644 --- a/drivers/usb/renesas_usbhs/Kconfig +++ b/drivers/usb/renesas_usbhs/Kconfig | |||
| @@ -6,6 +6,7 @@ config USB_RENESAS_USBHS | |||
| 6 | tristate 'Renesas USBHS controller' | 6 | tristate 'Renesas USBHS controller' |
| 7 | depends on USB_GADGET | 7 | depends on USB_GADGET |
| 8 | depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST | 8 | depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST |
| 9 | depends on EXTCON || !EXTCON # if EXTCON=m, USBHS cannot be built-in | ||
| 9 | default n | 10 | default n |
| 10 | help | 11 | help |
| 11 | Renesas USBHS is a discrete USB host and peripheral controller chip | 12 | Renesas USBHS is a discrete USB host and peripheral controller chip |
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c index 9374bd2aba20..8936a83c96cd 100644 --- a/drivers/usb/serial/bus.c +++ b/drivers/usb/serial/bus.c | |||
| @@ -38,56 +38,51 @@ static int usb_serial_device_match(struct device *dev, | |||
| 38 | return 0; | 38 | return 0; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static ssize_t port_number_show(struct device *dev, | ||
| 42 | struct device_attribute *attr, char *buf) | ||
| 43 | { | ||
| 44 | struct usb_serial_port *port = to_usb_serial_port(dev); | ||
| 45 | |||
| 46 | return sprintf(buf, "%d\n", port->port_number); | ||
| 47 | } | ||
| 48 | static DEVICE_ATTR_RO(port_number); | ||
| 49 | |||
| 50 | static int usb_serial_device_probe(struct device *dev) | 41 | static int usb_serial_device_probe(struct device *dev) |
| 51 | { | 42 | { |
| 52 | struct usb_serial_driver *driver; | 43 | struct usb_serial_driver *driver; |
| 53 | struct usb_serial_port *port; | 44 | struct usb_serial_port *port; |
| 45 | struct device *tty_dev; | ||
| 54 | int retval = 0; | 46 | int retval = 0; |
| 55 | int minor; | 47 | int minor; |
| 56 | 48 | ||
| 57 | port = to_usb_serial_port(dev); | 49 | port = to_usb_serial_port(dev); |
| 58 | if (!port) { | 50 | if (!port) |
| 59 | retval = -ENODEV; | 51 | return -ENODEV; |
| 60 | goto exit; | ||
| 61 | } | ||
| 62 | 52 | ||
| 63 | /* make sure suspend/resume doesn't race against port_probe */ | 53 | /* make sure suspend/resume doesn't race against port_probe */ |
| 64 | retval = usb_autopm_get_interface(port->serial->interface); | 54 | retval = usb_autopm_get_interface(port->serial->interface); |
| 65 | if (retval) | 55 | if (retval) |
| 66 | goto exit; | 56 | return retval; |
| 67 | 57 | ||
| 68 | driver = port->serial->type; | 58 | driver = port->serial->type; |
| 69 | if (driver->port_probe) { | 59 | if (driver->port_probe) { |
| 70 | retval = driver->port_probe(port); | 60 | retval = driver->port_probe(port); |
| 71 | if (retval) | 61 | if (retval) |
| 72 | goto exit_with_autopm; | 62 | goto err_autopm_put; |
| 73 | } | 63 | } |
| 74 | 64 | ||
| 75 | retval = device_create_file(dev, &dev_attr_port_number); | 65 | minor = port->minor; |
| 76 | if (retval) { | 66 | tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev); |
| 77 | if (driver->port_remove) | 67 | if (IS_ERR(tty_dev)) { |
| 78 | retval = driver->port_remove(port); | 68 | retval = PTR_ERR(tty_dev); |
| 79 | goto exit_with_autopm; | 69 | goto err_port_remove; |
| 80 | } | 70 | } |
| 81 | 71 | ||
| 82 | minor = port->minor; | 72 | usb_autopm_put_interface(port->serial->interface); |
| 83 | tty_register_device(usb_serial_tty_driver, minor, dev); | 73 | |
| 84 | dev_info(&port->serial->dev->dev, | 74 | dev_info(&port->serial->dev->dev, |
| 85 | "%s converter now attached to ttyUSB%d\n", | 75 | "%s converter now attached to ttyUSB%d\n", |
| 86 | driver->description, minor); | 76 | driver->description, minor); |
| 87 | 77 | ||
| 88 | exit_with_autopm: | 78 | return 0; |
| 79 | |||
| 80 | err_port_remove: | ||
| 81 | if (driver->port_remove) | ||
| 82 | driver->port_remove(port); | ||
| 83 | err_autopm_put: | ||
| 89 | usb_autopm_put_interface(port->serial->interface); | 84 | usb_autopm_put_interface(port->serial->interface); |
| 90 | exit: | 85 | |
| 91 | return retval; | 86 | return retval; |
| 92 | } | 87 | } |
| 93 | 88 | ||
| @@ -114,8 +109,6 @@ static int usb_serial_device_remove(struct device *dev) | |||
| 114 | minor = port->minor; | 109 | minor = port->minor; |
| 115 | tty_unregister_device(usb_serial_tty_driver, minor); | 110 | tty_unregister_device(usb_serial_tty_driver, minor); |
| 116 | 111 | ||
| 117 | device_remove_file(&port->dev, &dev_attr_port_number); | ||
| 118 | |||
| 119 | driver = port->serial->type; | 112 | driver = port->serial->type; |
| 120 | if (driver->port_remove) | 113 | if (driver->port_remove) |
| 121 | retval = driver->port_remove(port); | 114 | retval = driver->port_remove(port); |
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 2d72aa3564a3..ede4f5fcfadd 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c | |||
| @@ -84,6 +84,10 @@ struct ch341_private { | |||
| 84 | u8 line_status; /* active status of modem control inputs */ | 84 | u8 line_status; /* active status of modem control inputs */ |
| 85 | }; | 85 | }; |
| 86 | 86 | ||
| 87 | static void ch341_set_termios(struct tty_struct *tty, | ||
| 88 | struct usb_serial_port *port, | ||
| 89 | struct ktermios *old_termios); | ||
| 90 | |||
| 87 | static int ch341_control_out(struct usb_device *dev, u8 request, | 91 | static int ch341_control_out(struct usb_device *dev, u8 request, |
| 88 | u16 value, u16 index) | 92 | u16 value, u16 index) |
| 89 | { | 93 | { |
| @@ -309,19 +313,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 309 | struct ch341_private *priv = usb_get_serial_port_data(port); | 313 | struct ch341_private *priv = usb_get_serial_port_data(port); |
| 310 | int r; | 314 | int r; |
| 311 | 315 | ||
| 312 | priv->baud_rate = DEFAULT_BAUD_RATE; | ||
| 313 | |||
| 314 | r = ch341_configure(serial->dev, priv); | 316 | r = ch341_configure(serial->dev, priv); |
| 315 | if (r) | 317 | if (r) |
| 316 | goto out; | 318 | goto out; |
| 317 | 319 | ||
| 318 | r = ch341_set_handshake(serial->dev, priv->line_control); | 320 | if (tty) |
| 319 | if (r) | 321 | ch341_set_termios(tty, port, NULL); |
| 320 | goto out; | ||
| 321 | |||
| 322 | r = ch341_set_baudrate(serial->dev, priv); | ||
| 323 | if (r) | ||
| 324 | goto out; | ||
| 325 | 322 | ||
| 326 | dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__); | 323 | dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__); |
| 327 | r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); | 324 | r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 29fa1c3d0089..3806e7014199 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 15 | 15 | ||
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/module.h> | ||
| 17 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
| 18 | #include <linux/tty.h> | 19 | #include <linux/tty.h> |
| 19 | #include <linux/console.h> | 20 | #include <linux/console.h> |
| @@ -144,6 +145,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
| 144 | init_ldsem(&tty->ldisc_sem); | 145 | init_ldsem(&tty->ldisc_sem); |
| 145 | INIT_LIST_HEAD(&tty->tty_files); | 146 | INIT_LIST_HEAD(&tty->tty_files); |
| 146 | kref_get(&tty->driver->kref); | 147 | kref_get(&tty->driver->kref); |
| 148 | __module_get(tty->driver->owner); | ||
| 147 | tty->ops = &usb_console_fake_tty_ops; | 149 | tty->ops = &usb_console_fake_tty_ops; |
| 148 | if (tty_init_termios(tty)) { | 150 | if (tty_init_termios(tty)) { |
| 149 | retval = -ENOMEM; | 151 | retval = -ENOMEM; |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f40c856ff758..84ce2d74894c 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = { | |||
| 147 | { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ | 147 | { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ |
| 148 | { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ | 148 | { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ |
| 149 | { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ | 149 | { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ |
| 150 | { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */ | ||
| 151 | { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */ | ||
| 150 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ | 152 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ |
| 151 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ | 153 | { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ |
| 152 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ | 154 | { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1ebb351b9e9a..3086dec0ef53 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -799,6 +799,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
| 799 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, | 799 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, |
| 800 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, | 800 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, |
| 801 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, | 801 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, |
| 802 | { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), | ||
| 803 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
| 802 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), | 804 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), |
| 803 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 805 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 804 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), | 806 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), |
| @@ -978,6 +980,23 @@ static const struct usb_device_id id_table_combined[] = { | |||
| 978 | { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, | 980 | { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, |
| 979 | /* GE Healthcare devices */ | 981 | /* GE Healthcare devices */ |
| 980 | { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, | 982 | { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, |
| 983 | /* Active Research (Actisense) devices */ | ||
| 984 | { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) }, | ||
| 985 | { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, | ||
| 986 | { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, | ||
| 987 | { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, | ||
| 988 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, | ||
| 989 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, | ||
| 990 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, | ||
| 991 | { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, | ||
| 992 | { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, | ||
| 993 | { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, | ||
| 994 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) }, | ||
| 995 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) }, | ||
| 996 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) }, | ||
| 997 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, | ||
| 998 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, | ||
| 999 | { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, | ||
| 981 | { } /* Terminating entry */ | 1000 | { } /* Terminating entry */ |
| 982 | }; | 1001 | }; |
| 983 | 1002 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e52409c9be99..56b1b55c4751 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -38,6 +38,9 @@ | |||
| 38 | 38 | ||
| 39 | #define FTDI_LUMEL_PD12_PID 0x6002 | 39 | #define FTDI_LUMEL_PD12_PID 0x6002 |
| 40 | 40 | ||
| 41 | /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ | ||
| 42 | #define CYBER_CORTEX_AV_PID 0x8698 | ||
| 43 | |||
| 41 | /* | 44 | /* |
| 42 | * Marvell OpenRD Base, Client | 45 | * Marvell OpenRD Base, Client |
| 43 | * http://www.open-rd.org | 46 | * http://www.open-rd.org |
| @@ -1438,3 +1441,23 @@ | |||
| 1438 | */ | 1441 | */ |
| 1439 | #define GE_HEALTHCARE_VID 0x1901 | 1442 | #define GE_HEALTHCARE_VID 0x1901 |
| 1440 | #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 | 1443 | #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 |
| 1444 | |||
| 1445 | /* | ||
| 1446 | * Active Research (Actisense) devices | ||
| 1447 | */ | ||
| 1448 | #define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */ | ||
| 1449 | #define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ | ||
| 1450 | #define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ | ||
| 1451 | #define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ | ||
| 1452 | #define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ | ||
| 1453 | #define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ | ||
| 1454 | #define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ | ||
| 1455 | #define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ | ||
| 1456 | #define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ | ||
| 1457 | #define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */ | ||
| 1458 | #define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */ | ||
| 1459 | #define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */ | ||
| 1460 | #define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */ | ||
| 1461 | #define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */ | ||
| 1462 | #define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ | ||
| 1463 | #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ | ||
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index ccf1df7c4b80..54e170dd3dad 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
| @@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) | |||
| 258 | * character or at least one jiffy. | 258 | * character or at least one jiffy. |
| 259 | */ | 259 | */ |
| 260 | period = max_t(unsigned long, (10 * HZ / bps), 1); | 260 | period = max_t(unsigned long, (10 * HZ / bps), 1); |
| 261 | period = min_t(unsigned long, period, timeout); | 261 | if (timeout) |
| 262 | period = min_t(unsigned long, period, timeout); | ||
| 262 | 263 | ||
| 263 | dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", | 264 | dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", |
| 264 | __func__, jiffies_to_msecs(timeout), | 265 | __func__, jiffies_to_msecs(timeout), |
| @@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) | |||
| 268 | schedule_timeout_interruptible(period); | 269 | schedule_timeout_interruptible(period); |
| 269 | if (signal_pending(current)) | 270 | if (signal_pending(current)) |
| 270 | break; | 271 | break; |
| 271 | if (time_after(jiffies, expire)) | 272 | if (timeout && time_after(jiffies, expire)) |
| 272 | break; | 273 | break; |
| 273 | } | 274 | } |
| 274 | } | 275 | } |
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c index ab1d690274ae..460a40669967 100644 --- a/drivers/usb/serial/mxuport.c +++ b/drivers/usb/serial/mxuport.c | |||
| @@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
| 1284 | } | 1284 | } |
| 1285 | 1285 | ||
| 1286 | /* Initial port termios */ | 1286 | /* Initial port termios */ |
| 1287 | mxuport_set_termios(tty, port, NULL); | 1287 | if (tty) |
| 1288 | mxuport_set_termios(tty, port, NULL); | ||
| 1288 | 1289 | ||
| 1289 | /* | 1290 | /* |
| 1290 | * TODO: use RQ_VENDOR_GET_MSR, once we know what it | 1291 | * TODO: use RQ_VENDOR_GET_MSR, once we know what it |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 0f872e6b2c87..829604d11f3f 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
| @@ -132,6 +132,7 @@ MODULE_DEVICE_TABLE(usb, id_table); | |||
| 132 | #define UART_OVERRUN_ERROR 0x40 | 132 | #define UART_OVERRUN_ERROR 0x40 |
| 133 | #define UART_CTS 0x80 | 133 | #define UART_CTS 0x80 |
| 134 | 134 | ||
| 135 | static void pl2303_set_break(struct usb_serial_port *port, bool enable); | ||
| 135 | 136 | ||
| 136 | enum pl2303_type { | 137 | enum pl2303_type { |
| 137 | TYPE_01, /* Type 0 and 1 (difference unknown) */ | 138 | TYPE_01, /* Type 0 and 1 (difference unknown) */ |
| @@ -615,6 +616,7 @@ static void pl2303_close(struct usb_serial_port *port) | |||
| 615 | { | 616 | { |
| 616 | usb_serial_generic_close(port); | 617 | usb_serial_generic_close(port); |
| 617 | usb_kill_urb(port->interrupt_in_urb); | 618 | usb_kill_urb(port->interrupt_in_urb); |
| 619 | pl2303_set_break(port, false); | ||
| 618 | } | 620 | } |
| 619 | 621 | ||
| 620 | static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) | 622 | static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) |
| @@ -741,17 +743,16 @@ static int pl2303_ioctl(struct tty_struct *tty, | |||
| 741 | return -ENOIOCTLCMD; | 743 | return -ENOIOCTLCMD; |
| 742 | } | 744 | } |
| 743 | 745 | ||
| 744 | static void pl2303_break_ctl(struct tty_struct *tty, int break_state) | 746 | static void pl2303_set_break(struct usb_serial_port *port, bool enable) |
| 745 | { | 747 | { |
| 746 | struct usb_serial_port *port = tty->driver_data; | ||
| 747 | struct usb_serial *serial = port->serial; | 748 | struct usb_serial *serial = port->serial; |
| 748 | u16 state; | 749 | u16 state; |
| 749 | int result; | 750 | int result; |
| 750 | 751 | ||
| 751 | if (break_state == 0) | 752 | if (enable) |
| 752 | state = BREAK_OFF; | ||
| 753 | else | ||
| 754 | state = BREAK_ON; | 753 | state = BREAK_ON; |
| 754 | else | ||
| 755 | state = BREAK_OFF; | ||
| 755 | 756 | ||
| 756 | dev_dbg(&port->dev, "%s - turning break %s\n", __func__, | 757 | dev_dbg(&port->dev, "%s - turning break %s\n", __func__, |
| 757 | state == BREAK_OFF ? "off" : "on"); | 758 | state == BREAK_OFF ? "off" : "on"); |
| @@ -763,6 +764,13 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state) | |||
| 763 | dev_err(&port->dev, "error sending break = %d\n", result); | 764 | dev_err(&port->dev, "error sending break = %d\n", result); |
| 764 | } | 765 | } |
| 765 | 766 | ||
| 767 | static void pl2303_break_ctl(struct tty_struct *tty, int state) | ||
| 768 | { | ||
| 769 | struct usb_serial_port *port = tty->driver_data; | ||
| 770 | |||
| 771 | pl2303_set_break(port, state); | ||
| 772 | } | ||
| 773 | |||
| 766 | static void pl2303_update_line_status(struct usb_serial_port *port, | 774 | static void pl2303_update_line_status(struct usb_serial_port *port, |
| 767 | unsigned char *data, | 775 | unsigned char *data, |
| 768 | unsigned int actual_length) | 776 | unsigned int actual_length) |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 475723c006f9..529066bbc7e8 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
| @@ -687,6 +687,21 @@ static void serial_port_dtr_rts(struct tty_port *port, int on) | |||
| 687 | drv->dtr_rts(p, on); | 687 | drv->dtr_rts(p, on); |
| 688 | } | 688 | } |
| 689 | 689 | ||
| 690 | static ssize_t port_number_show(struct device *dev, | ||
| 691 | struct device_attribute *attr, char *buf) | ||
| 692 | { | ||
| 693 | struct usb_serial_port *port = to_usb_serial_port(dev); | ||
| 694 | |||
| 695 | return sprintf(buf, "%u\n", port->port_number); | ||
| 696 | } | ||
| 697 | static DEVICE_ATTR_RO(port_number); | ||
| 698 | |||
| 699 | static struct attribute *usb_serial_port_attrs[] = { | ||
| 700 | &dev_attr_port_number.attr, | ||
| 701 | NULL | ||
| 702 | }; | ||
| 703 | ATTRIBUTE_GROUPS(usb_serial_port); | ||
| 704 | |||
| 690 | static const struct tty_port_operations serial_port_ops = { | 705 | static const struct tty_port_operations serial_port_ops = { |
| 691 | .carrier_raised = serial_port_carrier_raised, | 706 | .carrier_raised = serial_port_carrier_raised, |
| 692 | .dtr_rts = serial_port_dtr_rts, | 707 | .dtr_rts = serial_port_dtr_rts, |
| @@ -902,6 +917,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
| 902 | port->dev.driver = NULL; | 917 | port->dev.driver = NULL; |
| 903 | port->dev.bus = &usb_serial_bus_type; | 918 | port->dev.bus = &usb_serial_bus_type; |
| 904 | port->dev.release = &usb_serial_port_release; | 919 | port->dev.release = &usb_serial_port_release; |
| 920 | port->dev.groups = usb_serial_port_groups; | ||
| 905 | device_initialize(&port->dev); | 921 | device_initialize(&port->dev); |
| 906 | } | 922 | } |
| 907 | 923 | ||
| @@ -940,8 +956,9 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
| 940 | port = serial->port[i]; | 956 | port = serial->port[i]; |
| 941 | if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) | 957 | if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) |
| 942 | goto probe_error; | 958 | goto probe_error; |
| 943 | buffer_size = max_t(int, serial->type->bulk_out_size, | 959 | buffer_size = serial->type->bulk_out_size; |
| 944 | usb_endpoint_maxp(endpoint)); | 960 | if (!buffer_size) |
| 961 | buffer_size = usb_endpoint_maxp(endpoint); | ||
| 945 | port->bulk_out_size = buffer_size; | 962 | port->bulk_out_size = buffer_size; |
| 946 | port->bulk_out_endpointAddress = endpoint->bEndpointAddress; | 963 | port->bulk_out_endpointAddress = endpoint->bEndpointAddress; |
| 947 | 964 | ||
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index dbc00e56c7f5..82570425fdfe 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
| @@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, | |||
| 113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 113 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 114 | US_FL_NO_ATA_1X), | 114 | US_FL_NO_ATA_1X), |
| 115 | 115 | ||
| 116 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ | ||
| 117 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, | ||
| 118 | "JMicron", | ||
| 119 | "JMS539", | ||
| 120 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 121 | US_FL_NO_REPORT_OPCODES), | ||
| 122 | |||
| 116 | /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ | 123 | /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ |
| 117 | UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, | 124 | UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, |
| 118 | "JMicron", | 125 | "JMicron", |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index d468d02179f4..5600c33fcadb 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
| @@ -889,6 +889,12 @@ static void usb_stor_scan_dwork(struct work_struct *work) | |||
| 889 | !(us->fflags & US_FL_SCM_MULT_TARG)) { | 889 | !(us->fflags & US_FL_SCM_MULT_TARG)) { |
| 890 | mutex_lock(&us->dev_mutex); | 890 | mutex_lock(&us->dev_mutex); |
| 891 | us->max_lun = usb_stor_Bulk_max_lun(us); | 891 | us->max_lun = usb_stor_Bulk_max_lun(us); |
| 892 | /* | ||
| 893 | * Allow proper scanning of devices that present more than 8 LUNs | ||
| 894 | * While not affecting other devices that may need the previous behavior | ||
| 895 | */ | ||
| 896 | if (us->max_lun >= 8) | ||
| 897 | us_to_host(us)->max_lun = us->max_lun+1; | ||
| 892 | mutex_unlock(&us->dev_mutex); | 898 | mutex_unlock(&us->dev_mutex); |
| 893 | } | 899 | } |
| 894 | scsi_scan_host(us_to_host(us)); | 900 | scsi_scan_host(us_to_host(us)); |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index f88bfdf5b6a0..2027a27546ef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
| @@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, | |||
| 868 | func = vfio_pci_set_err_trigger; | 868 | func = vfio_pci_set_err_trigger; |
| 869 | break; | 869 | break; |
| 870 | } | 870 | } |
| 871 | break; | ||
| 871 | case VFIO_PCI_REQ_IRQ_INDEX: | 872 | case VFIO_PCI_REQ_IRQ_INDEX: |
| 872 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | 873 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { |
| 873 | case VFIO_IRQ_SET_ACTION_TRIGGER: | 874 | case VFIO_IRQ_SET_ACTION_TRIGGER: |
| 874 | func = vfio_pci_set_req_trigger; | 875 | func = vfio_pci_set_req_trigger; |
| 875 | break; | 876 | break; |
| 876 | } | 877 | } |
| 878 | break; | ||
| 877 | } | 879 | } |
| 878 | 880 | ||
| 879 | if (!func) | 881 | if (!func) |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index afa06d28725d..2bbfc25e582c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
| @@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net) | |||
| 591 | * TODO: support TSO. | 591 | * TODO: support TSO. |
| 592 | */ | 592 | */ |
| 593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); | 593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); |
| 594 | } else { | ||
| 595 | /* It'll come from socket; we'll need to patch | ||
| 596 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | ||
| 597 | */ | ||
| 598 | iov_iter_advance(&fixup, sizeof(hdr)); | ||
| 599 | } | 594 | } |
| 600 | err = sock->ops->recvmsg(NULL, sock, &msg, | 595 | err = sock->ops->recvmsg(NULL, sock, &msg, |
| 601 | sock_len, MSG_DONTWAIT | MSG_TRUNC); | 596 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
| @@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net) | |||
| 609 | continue; | 604 | continue; |
| 610 | } | 605 | } |
| 611 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ | 606 | /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ |
| 612 | if (unlikely(vhost_hlen) && | 607 | if (unlikely(vhost_hlen)) { |
| 613 | copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { | 608 | if (copy_to_iter(&hdr, sizeof(hdr), |
| 614 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", | 609 | &fixup) != sizeof(hdr)) { |
| 615 | vq->iov->iov_base); | 610 | vq_err(vq, "Unable to write vnet_hdr " |
| 616 | break; | 611 | "at addr %p\n", vq->iov->iov_base); |
| 612 | break; | ||
| 613 | } | ||
| 614 | } else { | ||
| 615 | /* Header came from socket; we'll need to patch | ||
| 616 | * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF | ||
| 617 | */ | ||
| 618 | iov_iter_advance(&fixup, sizeof(hdr)); | ||
| 617 | } | 619 | } |
| 618 | /* TODO: Should check and handle checksum. */ | 620 | /* TODO: Should check and handle checksum. */ |
| 619 | 621 | ||
| 620 | num_buffers = cpu_to_vhost16(vq, headcount); | 622 | num_buffers = cpu_to_vhost16(vq, headcount); |
| 621 | if (likely(mergeable) && | 623 | if (likely(mergeable) && |
| 622 | copy_to_iter(&num_buffers, 2, &fixup) != 2) { | 624 | copy_to_iter(&num_buffers, sizeof num_buffers, |
| 625 | &fixup) != sizeof num_buffers) { | ||
| 623 | vq_err(vq, "Failed num_buffers write"); | 626 | vq_err(vq, "Failed num_buffers write"); |
| 624 | vhost_discard_vq_desc(vq, headcount); | 627 | vhost_discard_vq_desc(vq, headcount); |
| 625 | break; | 628 | break; |
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 32c0b6b28097..9362424c2340 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c | |||
| @@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, | |||
| 599 | 599 | ||
| 600 | len = clcdfb_snprintf_mode(NULL, 0, mode); | 600 | len = clcdfb_snprintf_mode(NULL, 0, mode); |
| 601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); | 601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); |
| 602 | if (!name) | ||
| 603 | return -ENOMEM; | ||
| 604 | |||
| 602 | clcdfb_snprintf_mode(name, len + 1, mode); | 605 | clcdfb_snprintf_mode(name, len + 1, mode); |
| 603 | mode->name = name; | 606 | mode->name = name; |
| 604 | 607 | ||
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 95338593ebf4..868facdec638 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c | |||
| @@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
| 624 | int num = 0, i, first = 1; | 624 | int num = 0, i, first = 1; |
| 625 | int ver, rev; | 625 | int ver, rev; |
| 626 | 626 | ||
| 627 | ver = edid[EDID_STRUCT_VERSION]; | ||
| 628 | rev = edid[EDID_STRUCT_REVISION]; | ||
| 629 | |||
| 630 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); | 627 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); |
| 631 | if (mode == NULL) | 628 | if (mode == NULL) |
| 632 | return NULL; | 629 | return NULL; |
| @@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
| 637 | return NULL; | 634 | return NULL; |
| 638 | } | 635 | } |
| 639 | 636 | ||
| 637 | ver = edid[EDID_STRUCT_VERSION]; | ||
| 638 | rev = edid[EDID_STRUCT_REVISION]; | ||
| 639 | |||
| 640 | *dbsize = 0; | 640 | *dbsize = 0; |
| 641 | 641 | ||
| 642 | DPRINTK(" Detailed Timings\n"); | 642 | DPRINTK(" Detailed Timings\n"); |
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c index 5a2095a98ed8..12186557a9d4 100644 --- a/drivers/video/fbdev/omap2/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c | |||
| @@ -28,44 +28,22 @@ | |||
| 28 | #include <video/omapdss.h> | 28 | #include <video/omapdss.h> |
| 29 | #include "dss.h" | 29 | #include "dss.h" |
| 30 | 30 | ||
| 31 | static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) | 31 | static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) |
| 32 | { | 32 | { |
| 33 | struct omap_dss_device *dssdev = NULL; | ||
| 34 | |||
| 35 | for_each_dss_dev(dssdev) { | ||
| 36 | if (dssdev->dev == dev) { | ||
| 37 | omap_dss_put_device(dssdev); | ||
| 38 | return dssdev; | ||
| 39 | } | ||
| 40 | } | ||
| 41 | |||
| 42 | return NULL; | ||
| 43 | } | ||
| 44 | |||
| 45 | static ssize_t display_name_show(struct device *dev, | ||
| 46 | struct device_attribute *attr, char *buf) | ||
| 47 | { | ||
| 48 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 49 | |||
| 50 | return snprintf(buf, PAGE_SIZE, "%s\n", | 33 | return snprintf(buf, PAGE_SIZE, "%s\n", |
| 51 | dssdev->name ? | 34 | dssdev->name ? |
| 52 | dssdev->name : ""); | 35 | dssdev->name : ""); |
| 53 | } | 36 | } |
| 54 | 37 | ||
| 55 | static ssize_t display_enabled_show(struct device *dev, | 38 | static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) |
| 56 | struct device_attribute *attr, char *buf) | ||
| 57 | { | 39 | { |
| 58 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 59 | |||
| 60 | return snprintf(buf, PAGE_SIZE, "%d\n", | 40 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 61 | omapdss_device_is_enabled(dssdev)); | 41 | omapdss_device_is_enabled(dssdev)); |
| 62 | } | 42 | } |
| 63 | 43 | ||
| 64 | static ssize_t display_enabled_store(struct device *dev, | 44 | static ssize_t display_enabled_store(struct omap_dss_device *dssdev, |
| 65 | struct device_attribute *attr, | ||
| 66 | const char *buf, size_t size) | 45 | const char *buf, size_t size) |
| 67 | { | 46 | { |
| 68 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 69 | int r; | 47 | int r; |
| 70 | bool enable; | 48 | bool enable; |
| 71 | 49 | ||
| @@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev, | |||
| 90 | return size; | 68 | return size; |
| 91 | } | 69 | } |
| 92 | 70 | ||
| 93 | static ssize_t display_tear_show(struct device *dev, | 71 | static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) |
| 94 | struct device_attribute *attr, char *buf) | ||
| 95 | { | 72 | { |
| 96 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 97 | return snprintf(buf, PAGE_SIZE, "%d\n", | 73 | return snprintf(buf, PAGE_SIZE, "%d\n", |
| 98 | dssdev->driver->get_te ? | 74 | dssdev->driver->get_te ? |
| 99 | dssdev->driver->get_te(dssdev) : 0); | 75 | dssdev->driver->get_te(dssdev) : 0); |
| 100 | } | 76 | } |
| 101 | 77 | ||
| 102 | static ssize_t display_tear_store(struct device *dev, | 78 | static ssize_t display_tear_store(struct omap_dss_device *dssdev, |
| 103 | struct device_attribute *attr, const char *buf, size_t size) | 79 | const char *buf, size_t size) |
| 104 | { | 80 | { |
| 105 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 106 | int r; | 81 | int r; |
| 107 | bool te; | 82 | bool te; |
| 108 | 83 | ||
| @@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev, | |||
| 120 | return size; | 95 | return size; |
| 121 | } | 96 | } |
| 122 | 97 | ||
| 123 | static ssize_t display_timings_show(struct device *dev, | 98 | static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) |
| 124 | struct device_attribute *attr, char *buf) | ||
| 125 | { | 99 | { |
| 126 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 127 | struct omap_video_timings t; | 100 | struct omap_video_timings t; |
| 128 | 101 | ||
| 129 | if (!dssdev->driver->get_timings) | 102 | if (!dssdev->driver->get_timings) |
| @@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev, | |||
| 137 | t.y_res, t.vfp, t.vbp, t.vsw); | 110 | t.y_res, t.vfp, t.vbp, t.vsw); |
| 138 | } | 111 | } |
| 139 | 112 | ||
| 140 | static ssize_t display_timings_store(struct device *dev, | 113 | static ssize_t display_timings_store(struct omap_dss_device *dssdev, |
| 141 | struct device_attribute *attr, const char *buf, size_t size) | 114 | const char *buf, size_t size) |
| 142 | { | 115 | { |
| 143 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 144 | struct omap_video_timings t = dssdev->panel.timings; | 116 | struct omap_video_timings t = dssdev->panel.timings; |
| 145 | int r, found; | 117 | int r, found; |
| 146 | 118 | ||
| @@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev, | |||
| 176 | return size; | 148 | return size; |
| 177 | } | 149 | } |
| 178 | 150 | ||
| 179 | static ssize_t display_rotate_show(struct device *dev, | 151 | static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) |
| 180 | struct device_attribute *attr, char *buf) | ||
| 181 | { | 152 | { |
| 182 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 183 | int rotate; | 153 | int rotate; |
| 184 | if (!dssdev->driver->get_rotate) | 154 | if (!dssdev->driver->get_rotate) |
| 185 | return -ENOENT; | 155 | return -ENOENT; |
| @@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev, | |||
| 187 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); | 157 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); |
| 188 | } | 158 | } |
| 189 | 159 | ||
| 190 | static ssize_t display_rotate_store(struct device *dev, | 160 | static ssize_t display_rotate_store(struct omap_dss_device *dssdev, |
| 191 | struct device_attribute *attr, const char *buf, size_t size) | 161 | const char *buf, size_t size) |
| 192 | { | 162 | { |
| 193 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 194 | int rot, r; | 163 | int rot, r; |
| 195 | 164 | ||
| 196 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) | 165 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) |
| @@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev, | |||
| 207 | return size; | 176 | return size; |
| 208 | } | 177 | } |
| 209 | 178 | ||
| 210 | static ssize_t display_mirror_show(struct device *dev, | 179 | static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) |
| 211 | struct device_attribute *attr, char *buf) | ||
| 212 | { | 180 | { |
| 213 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 214 | int mirror; | 181 | int mirror; |
| 215 | if (!dssdev->driver->get_mirror) | 182 | if (!dssdev->driver->get_mirror) |
| 216 | return -ENOENT; | 183 | return -ENOENT; |
| @@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev, | |||
| 218 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); | 185 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); |
| 219 | } | 186 | } |
| 220 | 187 | ||
| 221 | static ssize_t display_mirror_store(struct device *dev, | 188 | static ssize_t display_mirror_store(struct omap_dss_device *dssdev, |
| 222 | struct device_attribute *attr, const char *buf, size_t size) | 189 | const char *buf, size_t size) |
| 223 | { | 190 | { |
| 224 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 225 | int r; | 191 | int r; |
| 226 | bool mirror; | 192 | bool mirror; |
| 227 | 193 | ||
| @@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev, | |||
| 239 | return size; | 205 | return size; |
| 240 | } | 206 | } |
| 241 | 207 | ||
| 242 | static ssize_t display_wss_show(struct device *dev, | 208 | static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) |
| 243 | struct device_attribute *attr, char *buf) | ||
| 244 | { | 209 | { |
| 245 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 246 | unsigned int wss; | 210 | unsigned int wss; |
| 247 | 211 | ||
| 248 | if (!dssdev->driver->get_wss) | 212 | if (!dssdev->driver->get_wss) |
| @@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev, | |||
| 253 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); | 217 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); |
| 254 | } | 218 | } |
| 255 | 219 | ||
| 256 | static ssize_t display_wss_store(struct device *dev, | 220 | static ssize_t display_wss_store(struct omap_dss_device *dssdev, |
| 257 | struct device_attribute *attr, const char *buf, size_t size) | 221 | const char *buf, size_t size) |
| 258 | { | 222 | { |
| 259 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
| 260 | u32 wss; | 223 | u32 wss; |
| 261 | int r; | 224 | int r; |
| 262 | 225 | ||
| @@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev, | |||
| 277 | return size; | 240 | return size; |
| 278 | } | 241 | } |
| 279 | 242 | ||
| 280 | static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); | 243 | struct display_attribute { |
| 281 | static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, | 244 | struct attribute attr; |
| 245 | ssize_t (*show)(struct omap_dss_device *, char *); | ||
| 246 | ssize_t (*store)(struct omap_dss_device *, const char *, size_t); | ||
| 247 | }; | ||
| 248 | |||
| 249 | #define DISPLAY_ATTR(_name, _mode, _show, _store) \ | ||
| 250 | struct display_attribute display_attr_##_name = \ | ||
| 251 | __ATTR(_name, _mode, _show, _store) | ||
| 252 | |||
| 253 | static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); | ||
| 254 | static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); | ||
| 255 | static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, | ||
| 282 | display_enabled_show, display_enabled_store); | 256 | display_enabled_show, display_enabled_store); |
| 283 | static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, | 257 | static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, |
| 284 | display_tear_show, display_tear_store); | 258 | display_tear_show, display_tear_store); |
| 285 | static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, | 259 | static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, |
| 286 | display_timings_show, display_timings_store); | 260 | display_timings_show, display_timings_store); |
| 287 | static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, | 261 | static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, |
| 288 | display_rotate_show, display_rotate_store); | 262 | display_rotate_show, display_rotate_store); |
| 289 | static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, | 263 | static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, |
| 290 | display_mirror_show, display_mirror_store); | 264 | display_mirror_show, display_mirror_store); |
| 291 | static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, | 265 | static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, |
| 292 | display_wss_show, display_wss_store); | 266 | display_wss_show, display_wss_store); |
| 293 | 267 | ||
| 294 | static const struct attribute *display_sysfs_attrs[] = { | 268 | static struct attribute *display_sysfs_attrs[] = { |
| 295 | &dev_attr_display_name.attr, | 269 | &display_attr_name.attr, |
| 296 | &dev_attr_enabled.attr, | 270 | &display_attr_display_name.attr, |
| 297 | &dev_attr_tear_elim.attr, | 271 | &display_attr_enabled.attr, |
| 298 | &dev_attr_timings.attr, | 272 | &display_attr_tear_elim.attr, |
| 299 | &dev_attr_rotate.attr, | 273 | &display_attr_timings.attr, |
| 300 | &dev_attr_mirror.attr, | 274 | &display_attr_rotate.attr, |
| 301 | &dev_attr_wss.attr, | 275 | &display_attr_mirror.attr, |
| 276 | &display_attr_wss.attr, | ||
| 302 | NULL | 277 | NULL |
| 303 | }; | 278 | }; |
| 304 | 279 | ||
| 280 | static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, | ||
| 281 | char *buf) | ||
| 282 | { | ||
| 283 | struct omap_dss_device *dssdev; | ||
| 284 | struct display_attribute *display_attr; | ||
| 285 | |||
| 286 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
| 287 | display_attr = container_of(attr, struct display_attribute, attr); | ||
| 288 | |||
| 289 | if (!display_attr->show) | ||
| 290 | return -ENOENT; | ||
| 291 | |||
| 292 | return display_attr->show(dssdev, buf); | ||
| 293 | } | ||
| 294 | |||
| 295 | static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, | ||
| 296 | const char *buf, size_t size) | ||
| 297 | { | ||
| 298 | struct omap_dss_device *dssdev; | ||
| 299 | struct display_attribute *display_attr; | ||
| 300 | |||
| 301 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
| 302 | display_attr = container_of(attr, struct display_attribute, attr); | ||
| 303 | |||
| 304 | if (!display_attr->store) | ||
| 305 | return -ENOENT; | ||
| 306 | |||
| 307 | return display_attr->store(dssdev, buf, size); | ||
| 308 | } | ||
| 309 | |||
| 310 | static const struct sysfs_ops display_sysfs_ops = { | ||
| 311 | .show = display_attr_show, | ||
| 312 | .store = display_attr_store, | ||
| 313 | }; | ||
| 314 | |||
| 315 | static struct kobj_type display_ktype = { | ||
| 316 | .sysfs_ops = &display_sysfs_ops, | ||
| 317 | .default_attrs = display_sysfs_attrs, | ||
| 318 | }; | ||
| 319 | |||
| 305 | int display_init_sysfs(struct platform_device *pdev) | 320 | int display_init_sysfs(struct platform_device *pdev) |
| 306 | { | 321 | { |
| 307 | struct omap_dss_device *dssdev = NULL; | 322 | struct omap_dss_device *dssdev = NULL; |
| 308 | int r; | 323 | int r; |
| 309 | 324 | ||
| 310 | for_each_dss_dev(dssdev) { | 325 | for_each_dss_dev(dssdev) { |
| 311 | struct kobject *kobj = &dssdev->dev->kobj; | 326 | r = kobject_init_and_add(&dssdev->kobj, &display_ktype, |
| 312 | 327 | &pdev->dev.kobj, dssdev->alias); | |
| 313 | r = sysfs_create_files(kobj, display_sysfs_attrs); | ||
| 314 | if (r) { | 328 | if (r) { |
| 315 | DSSERR("failed to create sysfs files\n"); | 329 | DSSERR("failed to create sysfs files\n"); |
| 316 | goto err; | 330 | omap_dss_put_device(dssdev); |
| 317 | } | ||
| 318 | |||
| 319 | r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias); | ||
| 320 | if (r) { | ||
| 321 | sysfs_remove_files(kobj, display_sysfs_attrs); | ||
| 322 | |||
| 323 | DSSERR("failed to create sysfs display link\n"); | ||
| 324 | goto err; | 331 | goto err; |
| 325 | } | 332 | } |
| 326 | } | 333 | } |
| @@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev) | |||
| 338 | struct omap_dss_device *dssdev = NULL; | 345 | struct omap_dss_device *dssdev = NULL; |
| 339 | 346 | ||
| 340 | for_each_dss_dev(dssdev) { | 347 | for_each_dss_dev(dssdev) { |
| 341 | sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); | 348 | if (kobject_name(&dssdev->kobj) == NULL) |
| 342 | sysfs_remove_files(&dssdev->dev->kobj, | 349 | continue; |
| 343 | display_sysfs_attrs); | 350 | |
| 351 | kobject_del(&dssdev->kobj); | ||
| 352 | kobject_put(&dssdev->kobj); | ||
| 353 | |||
| 354 | memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); | ||
| 344 | } | 355 | } |
| 345 | } | 356 | } |
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c index 6df940528fd2..1443b3c391de 100644 --- a/drivers/watchdog/at91sam9_wdt.c +++ b/drivers/watchdog/at91sam9_wdt.c | |||
| @@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt) | |||
| 208 | 208 | ||
| 209 | if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { | 209 | if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { |
| 210 | err = request_irq(wdt->irq, wdt_interrupt, | 210 | err = request_irq(wdt->irq, wdt_interrupt, |
| 211 | IRQF_SHARED | IRQF_IRQPOLL, | 211 | IRQF_SHARED | IRQF_IRQPOLL | |
| 212 | IRQF_NO_SUSPEND, | ||
| 212 | pdev->name, wdt); | 213 | pdev->name, wdt); |
| 213 | if (err) | 214 | if (err) |
| 214 | return err; | 215 | return err; |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b4bca2d4a7e5..70fba973a107 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
| 526 | pirq_query_unmask(irq); | 526 | pirq_query_unmask(irq); |
| 527 | 527 | ||
| 528 | rc = set_evtchn_to_irq(evtchn, irq); | 528 | rc = set_evtchn_to_irq(evtchn, irq); |
| 529 | if (rc != 0) { | 529 | if (rc) |
| 530 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", | 530 | goto err; |
| 531 | irq, rc); | 531 | |
| 532 | xen_evtchn_close(evtchn); | ||
| 533 | return 0; | ||
| 534 | } | ||
| 535 | bind_evtchn_to_cpu(evtchn, 0); | 532 | bind_evtchn_to_cpu(evtchn, 0); |
| 536 | info->evtchn = evtchn; | 533 | info->evtchn = evtchn; |
| 537 | 534 | ||
| 535 | rc = xen_evtchn_port_setup(info); | ||
| 536 | if (rc) | ||
| 537 | goto err; | ||
| 538 | |||
| 538 | out: | 539 | out: |
| 539 | unmask_evtchn(evtchn); | 540 | unmask_evtchn(evtchn); |
| 540 | eoi_pirq(irq_get_irq_data(irq)); | 541 | eoi_pirq(irq_get_irq_data(irq)); |
| 541 | 542 | ||
| 542 | return 0; | 543 | return 0; |
| 544 | |||
| 545 | err: | ||
| 546 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); | ||
| 547 | xen_evtchn_close(evtchn); | ||
| 548 | return 0; | ||
| 543 | } | 549 | } |
| 544 | 550 | ||
| 545 | static unsigned int startup_pirq(struct irq_data *data) | 551 | static unsigned int startup_pirq(struct irq_data *data) |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 46ae0f9f02ad..75fe3d466515 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | #include "conf_space.h" | 16 | #include "conf_space.h" |
| 17 | #include "conf_space_quirks.h" | 17 | #include "conf_space_quirks.h" |
| 18 | 18 | ||
| 19 | static bool permissive; | 19 | bool permissive; |
| 20 | module_param(permissive, bool, 0644); | 20 | module_param(permissive, bool, 0644); |
| 21 | 21 | ||
| 22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, | 22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, |
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index e56c934ad137..2e1d73d1d5d0 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h | |||
| @@ -64,6 +64,8 @@ struct config_field_entry { | |||
| 64 | void *data; | 64 | void *data; |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | extern bool permissive; | ||
| 68 | |||
| 67 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) | 69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) |
| 68 | 70 | ||
| 69 | /* Add fields to a device - the add_fields macro expects to get a pointer to | 71 | /* Add fields to a device - the add_fields macro expects to get a pointer to |
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c5ee82587e8c..2d7369391472 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
| @@ -11,6 +11,10 @@ | |||
| 11 | #include "pciback.h" | 11 | #include "pciback.h" |
| 12 | #include "conf_space.h" | 12 | #include "conf_space.h" |
| 13 | 13 | ||
| 14 | struct pci_cmd_info { | ||
| 15 | u16 val; | ||
| 16 | }; | ||
| 17 | |||
| 14 | struct pci_bar_info { | 18 | struct pci_bar_info { |
| 15 | u32 val; | 19 | u32 val; |
| 16 | u32 len_val; | 20 | u32 len_val; |
| @@ -20,22 +24,36 @@ struct pci_bar_info { | |||
| 20 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) | 24 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) |
| 21 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) | 25 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) |
| 22 | 26 | ||
| 23 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | 27 | /* Bits guests are allowed to control in permissive mode. */ |
| 28 | #define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ | ||
| 29 | PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ | ||
| 30 | PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) | ||
| 31 | |||
| 32 | static void *command_init(struct pci_dev *dev, int offset) | ||
| 24 | { | 33 | { |
| 25 | int i; | 34 | struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); |
| 26 | int ret; | 35 | int err; |
| 27 | 36 | ||
| 28 | ret = xen_pcibk_read_config_word(dev, offset, value, data); | 37 | if (!cmd) |
| 29 | if (!pci_is_enabled(dev)) | 38 | return ERR_PTR(-ENOMEM); |
| 30 | return ret; | 39 | |
| 31 | 40 | err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); | |
| 32 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 41 | if (err) { |
| 33 | if (dev->resource[i].flags & IORESOURCE_IO) | 42 | kfree(cmd); |
| 34 | *value |= PCI_COMMAND_IO; | 43 | return ERR_PTR(err); |
| 35 | if (dev->resource[i].flags & IORESOURCE_MEM) | ||
| 36 | *value |= PCI_COMMAND_MEMORY; | ||
| 37 | } | 44 | } |
| 38 | 45 | ||
| 46 | return cmd; | ||
| 47 | } | ||
| 48 | |||
| 49 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | ||
| 50 | { | ||
| 51 | int ret = pci_read_config_word(dev, offset, value); | ||
| 52 | const struct pci_cmd_info *cmd = data; | ||
| 53 | |||
| 54 | *value &= PCI_COMMAND_GUEST; | ||
| 55 | *value |= cmd->val & ~PCI_COMMAND_GUEST; | ||
| 56 | |||
| 39 | return ret; | 57 | return ret; |
| 40 | } | 58 | } |
| 41 | 59 | ||
| @@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
| 43 | { | 61 | { |
| 44 | struct xen_pcibk_dev_data *dev_data; | 62 | struct xen_pcibk_dev_data *dev_data; |
| 45 | int err; | 63 | int err; |
| 64 | u16 val; | ||
| 65 | struct pci_cmd_info *cmd = data; | ||
| 46 | 66 | ||
| 47 | dev_data = pci_get_drvdata(dev); | 67 | dev_data = pci_get_drvdata(dev); |
| 48 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { | 68 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { |
| @@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
| 83 | } | 103 | } |
| 84 | } | 104 | } |
| 85 | 105 | ||
| 106 | cmd->val = value; | ||
| 107 | |||
| 108 | if (!permissive && (!dev_data || !dev_data->permissive)) | ||
| 109 | return 0; | ||
| 110 | |||
| 111 | /* Only allow the guest to control certain bits. */ | ||
| 112 | err = pci_read_config_word(dev, offset, &val); | ||
| 113 | if (err || val == value) | ||
| 114 | return err; | ||
| 115 | |||
| 116 | value &= PCI_COMMAND_GUEST; | ||
| 117 | value |= val & ~PCI_COMMAND_GUEST; | ||
| 118 | |||
| 86 | return pci_write_config_word(dev, offset, value); | 119 | return pci_write_config_word(dev, offset, value); |
| 87 | } | 120 | } |
| 88 | 121 | ||
| @@ -282,6 +315,8 @@ static const struct config_field header_common[] = { | |||
| 282 | { | 315 | { |
| 283 | .offset = PCI_COMMAND, | 316 | .offset = PCI_COMMAND, |
| 284 | .size = 2, | 317 | .size = 2, |
| 318 | .init = command_init, | ||
| 319 | .release = bar_release, | ||
| 285 | .u.w.read = command_read, | 320 | .u.w.read = command_read, |
| 286 | .u.w.write = command_write, | 321 | .u.w.write = command_write, |
| 287 | }, | 322 | }, |
