diff options
Diffstat (limited to 'drivers')
246 files changed, 2151 insertions, 1318 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 0d92d0f915e9..c7ba948d253c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
| @@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
| 331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; | 331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; |
| 332 | 332 | ||
| 333 | pr->pblk = object.processor.pblk_address; | 333 | pr->pblk = object.processor.pblk_address; |
| 334 | |||
| 335 | /* | ||
| 336 | * We don't care about error returns - we just try to mark | ||
| 337 | * these reserved so that nobody else is confused into thinking | ||
| 338 | * that this region might be unused.. | ||
| 339 | * | ||
| 340 | * (In particular, allocating the IO range for Cardbus) | ||
| 341 | */ | ||
| 342 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
| 343 | } | 334 | } |
| 344 | 335 | ||
| 345 | /* | 336 | /* |
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 3d5b8a099351..c1d138e128cb 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c | |||
| @@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, | |||
| 754 | } | 754 | } |
| 755 | 755 | ||
| 756 | int acpi_video_get_levels(struct acpi_device *device, | 756 | int acpi_video_get_levels(struct acpi_device *device, |
| 757 | struct acpi_video_device_brightness **dev_br) | 757 | struct acpi_video_device_brightness **dev_br, |
| 758 | int *pmax_level) | ||
| 758 | { | 759 | { |
| 759 | union acpi_object *obj = NULL; | 760 | union acpi_object *obj = NULL; |
| 760 | int i, max_level = 0, count = 0, level_ac_battery = 0; | 761 | int i, max_level = 0, count = 0, level_ac_battery = 0; |
| @@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device, | |||
| 841 | 842 | ||
| 842 | br->count = count; | 843 | br->count = count; |
| 843 | *dev_br = br; | 844 | *dev_br = br; |
| 845 | if (pmax_level) | ||
| 846 | *pmax_level = max_level; | ||
| 844 | 847 | ||
| 845 | out: | 848 | out: |
| 846 | kfree(obj); | 849 | kfree(obj); |
| @@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) | |||
| 869 | struct acpi_video_device_brightness *br = NULL; | 872 | struct acpi_video_device_brightness *br = NULL; |
| 870 | int result = -EINVAL; | 873 | int result = -EINVAL; |
| 871 | 874 | ||
| 872 | result = acpi_video_get_levels(device->dev, &br); | 875 | result = acpi_video_get_levels(device->dev, &br, &max_level); |
| 873 | if (result) | 876 | if (result) |
| 874 | return result; | 877 | return result; |
| 875 | device->brightness = br; | 878 | device->brightness = br; |
| @@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video) | |||
| 1737 | 1740 | ||
| 1738 | mutex_lock(&video->device_list_lock); | 1741 | mutex_lock(&video->device_list_lock); |
| 1739 | list_for_each_entry(dev, &video->video_device_list, entry) { | 1742 | list_for_each_entry(dev, &video->video_device_list, entry) { |
| 1740 | if (!acpi_video_device_lcd_query_levels(dev, &levels)) | 1743 | if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels)) |
| 1741 | kfree(levels); | 1744 | kfree(levels); |
| 1742 | } | 1745 | } |
| 1743 | mutex_unlock(&video->device_list_lock); | 1746 | mutex_unlock(&video->device_list_lock); |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 0f18dbc9a37f..daceb80022b0 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
| @@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value, | |||
| 83 | static u8 | 83 | static u8 |
| 84 | acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) | 84 | acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) |
| 85 | { | 85 | { |
| 86 | u64 address; | ||
| 87 | |||
| 88 | if (!reg->access_width) { | 86 | if (!reg->access_width) { |
| 87 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | ||
| 88 | max_bit_width = 32; | ||
| 89 | } | ||
| 90 | |||
| 89 | /* | 91 | /* |
| 90 | * Detect old register descriptors where only the bit_width field | 92 | * Detect old register descriptors where only the bit_width field |
| 91 | * makes senses. The target address is copied to handle possible | 93 | * makes senses. |
| 92 | * alignment issues. | ||
| 93 | */ | 94 | */ |
| 94 | ACPI_MOVE_64_TO_64(&address, ®->address); | 95 | if (reg->bit_width < max_bit_width && |
| 95 | if (!reg->bit_offset && reg->bit_width && | 96 | !reg->bit_offset && reg->bit_width && |
| 96 | ACPI_IS_POWER_OF_TWO(reg->bit_width) && | 97 | ACPI_IS_POWER_OF_TWO(reg->bit_width) && |
| 97 | ACPI_IS_ALIGNED(reg->bit_width, 8) && | 98 | ACPI_IS_ALIGNED(reg->bit_width, 8)) { |
| 98 | ACPI_IS_ALIGNED(address, reg->bit_width)) { | ||
| 99 | return (reg->bit_width); | 99 | return (reg->bit_width); |
| 100 | } else { | ||
| 101 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | ||
| 102 | return (32); | ||
| 103 | } else { | ||
| 104 | return (max_bit_width); | ||
| 105 | } | ||
| 106 | } | 100 | } |
| 101 | return (max_bit_width); | ||
| 107 | } else { | 102 | } else { |
| 108 | return (1 << (reg->access_width + 2)); | 103 | return (1 << (reg->access_width + 2)); |
| 109 | } | 104 | } |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 31e8da648fff..262ca31b86d9 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void) | |||
| 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it | 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it |
| 1052 | * is necessary to enable it as early as possible. | 1052 | * is necessary to enable it as early as possible. |
| 1053 | */ | 1053 | */ |
| 1054 | acpi_boot_ec_enable(); | 1054 | acpi_ec_dsdt_probe(); |
| 1055 | 1055 | ||
| 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); | 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); |
| 1057 | 1057 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 0e70181f150c..73c76d646064 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) | |||
| 1446 | return AE_OK; | 1446 | return AE_OK; |
| 1447 | } | 1447 | } |
| 1448 | 1448 | ||
| 1449 | int __init acpi_boot_ec_enable(void) | 1449 | static const struct acpi_device_id ec_device_ids[] = { |
| 1450 | {"PNP0C09", 0}, | ||
| 1451 | {"", 0}, | ||
| 1452 | }; | ||
| 1453 | |||
| 1454 | int __init acpi_ec_dsdt_probe(void) | ||
| 1450 | { | 1455 | { |
| 1451 | if (!boot_ec) | 1456 | acpi_status status; |
| 1457 | |||
| 1458 | if (boot_ec) | ||
| 1452 | return 0; | 1459 | return 0; |
| 1460 | |||
| 1461 | /* | ||
| 1462 | * Finding EC from DSDT if there is no ECDT EC available. When this | ||
| 1463 | * function is invoked, ACPI tables have been fully loaded, we can | ||
| 1464 | * walk namespace now. | ||
| 1465 | */ | ||
| 1466 | boot_ec = make_acpi_ec(); | ||
| 1467 | if (!boot_ec) | ||
| 1468 | return -ENOMEM; | ||
| 1469 | status = acpi_get_devices(ec_device_ids[0].id, | ||
| 1470 | ec_parse_device, boot_ec, NULL); | ||
| 1471 | if (ACPI_FAILURE(status) || !boot_ec->handle) | ||
| 1472 | return -ENODEV; | ||
| 1453 | if (!ec_install_handlers(boot_ec)) { | 1473 | if (!ec_install_handlers(boot_ec)) { |
| 1454 | first_ec = boot_ec; | 1474 | first_ec = boot_ec; |
| 1455 | return 0; | 1475 | return 0; |
| @@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void) | |||
| 1457 | return -EFAULT; | 1477 | return -EFAULT; |
| 1458 | } | 1478 | } |
| 1459 | 1479 | ||
| 1460 | static const struct acpi_device_id ec_device_ids[] = { | ||
| 1461 | {"PNP0C09", 0}, | ||
| 1462 | {"", 0}, | ||
| 1463 | }; | ||
| 1464 | |||
| 1465 | #if 0 | 1480 | #if 0 |
| 1466 | /* | 1481 | /* |
| 1467 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not | 1482 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 9bb0773d39bf..27cc7feabfe4 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data); | |||
| 181 | 181 | ||
| 182 | int acpi_ec_init(void); | 182 | int acpi_ec_init(void); |
| 183 | int acpi_ec_ecdt_probe(void); | 183 | int acpi_ec_ecdt_probe(void); |
| 184 | int acpi_boot_ec_enable(void); | 184 | int acpi_ec_dsdt_probe(void); |
| 185 | void acpi_ec_block_transactions(void); | 185 | void acpi_ec_block_transactions(void); |
| 186 | void acpi_ec_unblock_transactions(void); | 186 | void acpi_ec_unblock_transactions(void); |
| 187 | void acpi_ec_unblock_transactions_early(void); | 187 | void acpi_ec_unblock_transactions_early(void); |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index f170d746336d..c72e64893d03 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
| @@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) | |||
| 676 | if (!pr->flags.throttling) | 676 | if (!pr->flags.throttling) |
| 677 | return -ENODEV; | 677 | return -ENODEV; |
| 678 | 678 | ||
| 679 | /* | ||
| 680 | * We don't care about error returns - we just try to mark | ||
| 681 | * these reserved so that nobody else is confused into thinking | ||
| 682 | * that this region might be unused.. | ||
| 683 | * | ||
| 684 | * (In particular, allocating the IO range for Cardbus) | ||
| 685 | */ | ||
| 686 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
| 687 | |||
| 679 | pr->throttling.state = 0; | 688 | pr->throttling.state = 0; |
| 680 | 689 | ||
| 681 | duty_mask = pr->throttling.state_count - 1; | 690 | duty_mask = pr->throttling.state_count - 1; |
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index a969a7e443be..85aaf2222587 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c | |||
| @@ -181,13 +181,17 @@ static char *res_strings[] = { | |||
| 181 | "reserved 27", | 181 | "reserved 27", |
| 182 | "reserved 28", | 182 | "reserved 28", |
| 183 | "reserved 29", | 183 | "reserved 29", |
| 184 | "reserved 30", | 184 | "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */ |
| 185 | "reassembly abort: no buffers", | 185 | "reassembly abort: no buffers", |
| 186 | "receive buffer overflow", | 186 | "receive buffer overflow", |
| 187 | "change in GFC", | 187 | "change in GFC", |
| 188 | "receive buffer full", | 188 | "receive buffer full", |
| 189 | "low priority discard - no receive descriptor", | 189 | "low priority discard - no receive descriptor", |
| 190 | "low priority discard - missing end of packet", | 190 | "low priority discard - missing end of packet", |
| 191 | "reserved 37", | ||
| 192 | "reserved 38", | ||
| 193 | "reserved 39", | ||
| 194 | "reseverd 40", | ||
| 191 | "reserved 41", | 195 | "reserved 41", |
| 192 | "reserved 42", | 196 | "reserved 42", |
| 193 | "reserved 43", | 197 | "reserved 43", |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 7d00f2994738..809dd1e02091 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
| @@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev) | |||
| 1128 | /* make the ptr point to the corresponding buffer desc entry */ | 1128 | /* make the ptr point to the corresponding buffer desc entry */ |
| 1129 | buf_desc_ptr += desc; | 1129 | buf_desc_ptr += desc; |
| 1130 | if (!desc || (desc > iadev->num_rx_desc) || | 1130 | if (!desc || (desc > iadev->num_rx_desc) || |
| 1131 | ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { | 1131 | ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) { |
| 1132 | free_desc(dev, desc); | 1132 | free_desc(dev, desc); |
| 1133 | IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) | 1133 | IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) |
| 1134 | return -1; | 1134 | return -1; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 31e73a7a40f2..6a48ed41963f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) | |||
| 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); | 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); |
| 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); | 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); |
| 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); | 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); |
| 944 | debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); | 944 | debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); |
| 945 | 945 | ||
| 946 | return 0; | 946 | return 0; |
| 947 | } | 947 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ca13df854639..2e6d1e9c3345 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 874 | const struct blk_mq_queue_data *qd) | 874 | const struct blk_mq_queue_data *qd) |
| 875 | { | 875 | { |
| 876 | unsigned long flags; | 876 | unsigned long flags; |
| 877 | struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; | 877 | int qid = hctx->queue_num; |
| 878 | struct blkfront_info *info = hctx->queue->queuedata; | ||
| 879 | struct blkfront_ring_info *rinfo = NULL; | ||
| 878 | 880 | ||
| 881 | BUG_ON(info->nr_rings <= qid); | ||
| 882 | rinfo = &info->rinfo[qid]; | ||
| 879 | blk_mq_start_request(qd->rq); | 883 | blk_mq_start_request(qd->rq); |
| 880 | spin_lock_irqsave(&rinfo->ring_lock, flags); | 884 | spin_lock_irqsave(&rinfo->ring_lock, flags); |
| 881 | if (RING_FULL(&rinfo->ring)) | 885 | if (RING_FULL(&rinfo->ring)) |
| @@ -901,20 +905,9 @@ out_busy: | |||
| 901 | return BLK_MQ_RQ_QUEUE_BUSY; | 905 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 902 | } | 906 | } |
| 903 | 907 | ||
| 904 | static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||
| 905 | unsigned int index) | ||
| 906 | { | ||
| 907 | struct blkfront_info *info = (struct blkfront_info *)data; | ||
| 908 | |||
| 909 | BUG_ON(info->nr_rings <= index); | ||
| 910 | hctx->driver_data = &info->rinfo[index]; | ||
| 911 | return 0; | ||
| 912 | } | ||
| 913 | |||
| 914 | static struct blk_mq_ops blkfront_mq_ops = { | 908 | static struct blk_mq_ops blkfront_mq_ops = { |
| 915 | .queue_rq = blkif_queue_rq, | 909 | .queue_rq = blkif_queue_rq, |
| 916 | .map_queue = blk_mq_map_queue, | 910 | .map_queue = blk_mq_map_queue, |
| 917 | .init_hctx = blk_mq_init_hctx, | ||
| 918 | }; | 911 | }; |
| 919 | 912 | ||
| 920 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
| @@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
| 950 | return PTR_ERR(rq); | 943 | return PTR_ERR(rq); |
| 951 | } | 944 | } |
| 952 | 945 | ||
| 946 | rq->queuedata = info; | ||
| 953 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); |
| 954 | 948 | ||
| 955 | if (info->feature_discard) { | 949 | if (info->feature_discard) { |
| @@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
| 2149 | return err; | 2143 | return err; |
| 2150 | 2144 | ||
| 2151 | err = talk_to_blkback(dev, info); | 2145 | err = talk_to_blkback(dev, info); |
| 2146 | if (!err) | ||
| 2147 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); | ||
| 2152 | 2148 | ||
| 2153 | /* | 2149 | /* |
| 2154 | * We have to wait for the backend to switch to | 2150 | * We have to wait for the backend to switch to |
| @@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev, | |||
| 2485 | break; | 2481 | break; |
| 2486 | 2482 | ||
| 2487 | case XenbusStateConnected: | 2483 | case XenbusStateConnected: |
| 2488 | if (dev->state != XenbusStateInitialised) { | 2484 | /* |
| 2485 | * talk_to_blkback sets state to XenbusStateInitialised | ||
| 2486 | * and blkfront_connect sets it to XenbusStateConnected | ||
| 2487 | * (if connection went OK). | ||
| 2488 | * | ||
| 2489 | * If the backend (or toolstack) decides to poke at backend | ||
| 2490 | * state (and re-trigger the watch by setting the state repeatedly | ||
| 2491 | * to XenbusStateConnected (4)) we need to deal with this. | ||
| 2492 | * This is allowed as this is used to communicate to the guest | ||
| 2493 | * that the size of disk has changed! | ||
| 2494 | */ | ||
| 2495 | if ((dev->state != XenbusStateInitialised) && | ||
| 2496 | (dev->state != XenbusStateConnected)) { | ||
| 2489 | if (talk_to_blkback(dev, info)) | 2497 | if (talk_to_blkback(dev, info)) |
| 2490 | break; | 2498 | break; |
| 2491 | } | 2499 | } |
| 2500 | |||
| 2492 | blkfront_connect(info); | 2501 | blkfront_connect(info); |
| 2493 | break; | 2502 | break; |
| 2494 | 2503 | ||
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 53ddba26578c..98efbfcdb503 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
| @@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE | |||
| 175 | config COMMON_CLK_NXP | 175 | config COMMON_CLK_NXP |
| 176 | def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) | 176 | def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) |
| 177 | select REGMAP_MMIO if ARCH_LPC32XX | 177 | select REGMAP_MMIO if ARCH_LPC32XX |
| 178 | select MFD_SYSCON if ARCH_LPC18XX | ||
| 178 | ---help--- | 179 | ---help--- |
| 179 | Support for clock providers on NXP platforms. | 180 | Support for clock providers on NXP platforms. |
| 180 | 181 | ||
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c index 020a29acc5b0..51f54380474b 100644 --- a/drivers/clk/microchip/clk-pic32mzda.c +++ b/drivers/clk/microchip/clk-pic32mzda.c | |||
| @@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev) | |||
| 180 | 180 | ||
| 181 | /* register fixed rate clocks */ | 181 | /* register fixed rate clocks */ |
| 182 | clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, | 182 | clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, |
| 183 | CLK_IS_ROOT, 24000000); | 183 | 0, 24000000); |
| 184 | clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, | 184 | clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, |
| 185 | CLK_IS_ROOT, 8000000); | 185 | 0, 8000000); |
| 186 | clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, | 186 | clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, |
| 187 | CLK_IS_ROOT, 8000000); | 187 | 0, 8000000); |
| 188 | clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, | 188 | clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, |
| 189 | CLK_IS_ROOT, 32000); | 189 | 0, 32000); |
| 190 | clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, | 190 | clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, |
| 191 | CLK_IS_ROOT, 24000000); | 191 | 0, 24000000); |
| 192 | /* fixed rate (optional) clock */ | 192 | /* fixed rate (optional) clock */ |
| 193 | if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { | 193 | if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { |
| 194 | pr_info("pic32-clk: dt requests SOSC.\n"); | 194 | pr_info("pic32-clk: dt requests SOSC.\n"); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 36bc11a106aa..9009295f5134 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
| 1832 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, | 1832 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, |
| 1833 | unsigned int target_freq) | 1833 | unsigned int target_freq) |
| 1834 | { | 1834 | { |
| 1835 | clamp_val(target_freq, policy->min, policy->max); | 1835 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
| 1836 | 1836 | ||
| 1837 | return cpufreq_driver->fast_switch(policy, target_freq); | 1837 | return cpufreq_driver->fast_switch(policy, target_freq); |
| 1838 | } | 1838 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3a9c4325d6e2..ee367e9b7d2e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | |||
| 449 | cpu->acpi_perf_data.states[0].core_frequency = | 449 | cpu->acpi_perf_data.states[0].core_frequency = |
| 450 | policy->cpuinfo.max_freq / 1000; | 450 | policy->cpuinfo.max_freq / 1000; |
| 451 | cpu->valid_pss_table = true; | 451 | cpu->valid_pss_table = true; |
| 452 | pr_info("_PPC limits will be enforced\n"); | 452 | pr_debug("_PPC limits will be enforced\n"); |
| 453 | 453 | ||
| 454 | return; | 454 | return; |
| 455 | 455 | ||
| @@ -1460,6 +1460,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1460 | 1460 | ||
| 1461 | intel_pstate_clear_update_util_hook(policy->cpu); | 1461 | intel_pstate_clear_update_util_hook(policy->cpu); |
| 1462 | 1462 | ||
| 1463 | pr_debug("set_policy cpuinfo.max %u policy->max %u\n", | ||
| 1464 | policy->cpuinfo.max_freq, policy->max); | ||
| 1465 | |||
| 1463 | cpu = all_cpu_data[0]; | 1466 | cpu = all_cpu_data[0]; |
| 1464 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && | 1467 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && |
| 1465 | policy->max < policy->cpuinfo.max_freq && | 1468 | policy->max < policy->cpuinfo.max_freq && |
| @@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1495 | limits->max_sysfs_pct); | 1498 | limits->max_sysfs_pct); |
| 1496 | limits->max_perf_pct = max(limits->min_policy_pct, | 1499 | limits->max_perf_pct = max(limits->min_policy_pct, |
| 1497 | limits->max_perf_pct); | 1500 | limits->max_perf_pct); |
| 1498 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1499 | 1501 | ||
| 1500 | /* Make sure min_perf_pct <= max_perf_pct */ | 1502 | /* Make sure min_perf_pct <= max_perf_pct */ |
| 1501 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); | 1503 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); |
| 1502 | 1504 | ||
| 1503 | limits->min_perf = div_fp(limits->min_perf_pct, 100); | 1505 | limits->min_perf = div_fp(limits->min_perf_pct, 100); |
| 1504 | limits->max_perf = div_fp(limits->max_perf_pct, 100); | 1506 | limits->max_perf = div_fp(limits->max_perf_pct, 100); |
| 1507 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1505 | 1508 | ||
| 1506 | out: | 1509 | out: |
| 1507 | intel_pstate_set_update_util_hook(policy->cpu); | 1510 | intel_pstate_set_update_util_hook(policy->cpu); |
| @@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 1558 | 1561 | ||
| 1559 | /* cpuinfo and default policy values */ | 1562 | /* cpuinfo and default policy values */ |
| 1560 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1563 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 1561 | policy->cpuinfo.max_freq = | 1564 | update_turbo_state(); |
| 1562 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1565 | policy->cpuinfo.max_freq = limits->turbo_disabled ? |
| 1566 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | ||
| 1567 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; | ||
| 1568 | |||
| 1563 | intel_pstate_init_acpi_perf_limits(policy); | 1569 | intel_pstate_init_acpi_perf_limits(policy); |
| 1564 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1570 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
| 1565 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1571 | cpumask_set_cpu(policy->cpu, policy->cpus); |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 52c7395cb8d8..0d0d4529ee36 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c | |||
| @@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
| 122 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 122 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 123 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | 123 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
| 124 | unsigned int unit; | 124 | unsigned int unit; |
| 125 | u32 unit_size; | ||
| 125 | int ret; | 126 | int ret; |
| 126 | 127 | ||
| 127 | if (!ctx->u.aes.key_len) | 128 | if (!ctx->u.aes.key_len) |
| @@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
| 133 | if (!req->info) | 134 | if (!req->info) |
| 134 | return -EINVAL; | 135 | return -EINVAL; |
| 135 | 136 | ||
| 136 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) | 137 | unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; |
| 137 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) | 138 | if (req->nbytes <= unit_size_map[0].size) { |
| 138 | break; | 139 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { |
| 140 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) { | ||
| 141 | unit_size = unit_size_map[unit].value; | ||
| 142 | break; | ||
| 143 | } | ||
| 144 | } | ||
| 145 | } | ||
| 139 | 146 | ||
| 140 | if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || | 147 | if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || |
| 141 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { | 148 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { |
| 142 | /* Use the fallback to process the request for any | 149 | /* Use the fallback to process the request for any |
| 143 | * unsupported unit sizes or key sizes | 150 | * unsupported unit sizes or key sizes |
| @@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
| 158 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; | 165 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; |
| 159 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT | 166 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT |
| 160 | : CCP_AES_ACTION_DECRYPT; | 167 | : CCP_AES_ACTION_DECRYPT; |
| 161 | rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; | 168 | rctx->cmd.u.xts.unit_size = unit_size; |
| 162 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; | 169 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; |
| 163 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; | 170 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; |
| 164 | rctx->cmd.u.xts.iv = &rctx->iv_sg; | 171 | rctx->cmd.u.xts.iv = &rctx->iv_sg; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 6eefaa2fe58f..63464e86f2b1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
| @@ -1986,7 +1986,7 @@ err_algs: | |||
| 1986 | &dd->pdata->algs_info[i].algs_list[j]); | 1986 | &dd->pdata->algs_info[i].algs_list[j]); |
| 1987 | err_pm: | 1987 | err_pm: |
| 1988 | pm_runtime_disable(dev); | 1988 | pm_runtime_disable(dev); |
| 1989 | if (dd->polling_mode) | 1989 | if (!dd->polling_mode) |
| 1990 | dma_release_channel(dd->dma_lch); | 1990 | dma_release_channel(dd->dma_lch); |
| 1991 | data_err: | 1991 | data_err: |
| 1992 | dev_err(dev, "initialization failed.\n"); | 1992 | dev_err(dev, "initialization failed.\n"); |
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a2c07ee6677..6355ab38d630 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
| 34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
| 35 | #include <linux/reservation.h> | 35 | #include <linux/reservation.h> |
| 36 | #include <linux/mm.h> | ||
| 36 | 37 | ||
| 37 | #include <uapi/linux/dma-buf.h> | 38 | #include <uapi/linux/dma-buf.h> |
| 38 | 39 | ||
| @@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) | |||
| 90 | dmabuf = file->private_data; | 91 | dmabuf = file->private_data; |
| 91 | 92 | ||
| 92 | /* check for overflowing the buffer's size */ | 93 | /* check for overflowing the buffer's size */ |
| 93 | if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | 94 | if (vma->vm_pgoff + vma_pages(vma) > |
| 94 | dmabuf->size >> PAGE_SHIFT) | 95 | dmabuf->size >> PAGE_SHIFT) |
| 95 | return -EINVAL; | 96 | return -EINVAL; |
| 96 | 97 | ||
| @@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, | |||
| 723 | return -EINVAL; | 724 | return -EINVAL; |
| 724 | 725 | ||
| 725 | /* check for offset overflow */ | 726 | /* check for offset overflow */ |
| 726 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) | 727 | if (pgoff + vma_pages(vma) < pgoff) |
| 727 | return -EOVERFLOW; | 728 | return -EOVERFLOW; |
| 728 | 729 | ||
| 729 | /* check for overflowing the buffer's size */ | 730 | /* check for overflowing the buffer's size */ |
| 730 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | 731 | if (pgoff + vma_pages(vma) > |
| 731 | dmabuf->size >> PAGE_SHIFT) | 732 | dmabuf->size >> PAGE_SHIFT) |
| 732 | return -EINVAL; | 733 | return -EINVAL; |
| 733 | 734 | ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index c0bd5722c997..9566a62ad8e3 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
| @@ -35,6 +35,17 @@ | |||
| 35 | #include <linux/reservation.h> | 35 | #include <linux/reservation.h> |
| 36 | #include <linux/export.h> | 36 | #include <linux/export.h> |
| 37 | 37 | ||
| 38 | /** | ||
| 39 | * DOC: Reservation Object Overview | ||
| 40 | * | ||
| 41 | * The reservation object provides a mechanism to manage shared and | ||
| 42 | * exclusive fences associated with a buffer. A reservation object | ||
| 43 | * can have attached one exclusive fence (normally associated with | ||
| 44 | * write operations) or N shared fences (read operations). The RCU | ||
| 45 | * mechanism is used to protect read access to fences from locked | ||
| 46 | * write-side updates. | ||
| 47 | */ | ||
| 48 | |||
| 38 | DEFINE_WW_CLASS(reservation_ww_class); | 49 | DEFINE_WW_CLASS(reservation_ww_class); |
| 39 | EXPORT_SYMBOL(reservation_ww_class); | 50 | EXPORT_SYMBOL(reservation_ww_class); |
| 40 | 51 | ||
| @@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class); | |||
| 43 | 54 | ||
| 44 | const char reservation_seqcount_string[] = "reservation_seqcount"; | 55 | const char reservation_seqcount_string[] = "reservation_seqcount"; |
| 45 | EXPORT_SYMBOL(reservation_seqcount_string); | 56 | EXPORT_SYMBOL(reservation_seqcount_string); |
| 46 | /* | 57 | |
| 47 | * Reserve space to add a shared fence to a reservation_object, | 58 | /** |
| 48 | * must be called with obj->lock held. | 59 | * reservation_object_reserve_shared - Reserve space to add a shared |
| 60 | * fence to a reservation_object. | ||
| 61 | * @obj: reservation object | ||
| 62 | * | ||
| 63 | * Should be called before reservation_object_add_shared_fence(). Must | ||
| 64 | * be called with obj->lock held. | ||
| 65 | * | ||
| 66 | * RETURNS | ||
| 67 | * Zero for success, or -errno | ||
| 49 | */ | 68 | */ |
| 50 | int reservation_object_reserve_shared(struct reservation_object *obj) | 69 | int reservation_object_reserve_shared(struct reservation_object *obj) |
| 51 | { | 70 | { |
| @@ -180,7 +199,11 @@ done: | |||
| 180 | fence_put(old_fence); | 199 | fence_put(old_fence); |
| 181 | } | 200 | } |
| 182 | 201 | ||
| 183 | /* | 202 | /** |
| 203 | * reservation_object_add_shared_fence - Add a fence to a shared slot | ||
| 204 | * @obj: the reservation object | ||
| 205 | * @fence: the shared fence to add | ||
| 206 | * | ||
| 184 | * Add a fence to a shared slot, obj->lock must be held, and | 207 | * Add a fence to a shared slot, obj->lock must be held, and |
| 185 | * reservation_object_reserve_shared_fence has been called. | 208 | * reservation_object_reserve_shared_fence has been called. |
| 186 | */ | 209 | */ |
| @@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, | |||
| 200 | } | 223 | } |
| 201 | EXPORT_SYMBOL(reservation_object_add_shared_fence); | 224 | EXPORT_SYMBOL(reservation_object_add_shared_fence); |
| 202 | 225 | ||
| 226 | /** | ||
| 227 | * reservation_object_add_excl_fence - Add an exclusive fence. | ||
| 228 | * @obj: the reservation object | ||
| 229 | * @fence: the shared fence to add | ||
| 230 | * | ||
| 231 | * Add a fence to the exclusive slot. The obj->lock must be held. | ||
| 232 | */ | ||
| 203 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 233 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
| 204 | struct fence *fence) | 234 | struct fence *fence) |
| 205 | { | 235 | { |
| @@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
| 233 | } | 263 | } |
| 234 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | 264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
| 235 | 265 | ||
| 266 | /** | ||
| 267 | * reservation_object_get_fences_rcu - Get an object's shared and exclusive | ||
| 268 | * fences without update side lock held | ||
| 269 | * @obj: the reservation object | ||
| 270 | * @pfence_excl: the returned exclusive fence (or NULL) | ||
| 271 | * @pshared_count: the number of shared fences returned | ||
| 272 | * @pshared: the array of shared fence ptrs returned (array is krealloc'd to | ||
| 273 | * the required size, and must be freed by caller) | ||
| 274 | * | ||
| 275 | * RETURNS | ||
| 276 | * Zero or -errno | ||
| 277 | */ | ||
| 236 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
| 237 | struct fence **pfence_excl, | 279 | struct fence **pfence_excl, |
| 238 | unsigned *pshared_count, | 280 | unsigned *pshared_count, |
| @@ -319,6 +361,18 @@ unlock: | |||
| 319 | } | 361 | } |
| 320 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | 362 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); |
| 321 | 363 | ||
| 364 | /** | ||
| 365 | * reservation_object_wait_timeout_rcu - Wait on reservation's objects | ||
| 366 | * shared and/or exclusive fences. | ||
| 367 | * @obj: the reservation object | ||
| 368 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | ||
| 369 | * @intr: if true, do interruptible wait | ||
| 370 | * @timeout: timeout value in jiffies or zero to return immediately | ||
| 371 | * | ||
| 372 | * RETURNS | ||
| 373 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | ||
| 374 | * greater than zer on success. | ||
| 375 | */ | ||
| 322 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 376 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
| 323 | bool wait_all, bool intr, | 377 | bool wait_all, bool intr, |
| 324 | unsigned long timeout) | 378 | unsigned long timeout) |
| @@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence) | |||
| 416 | return ret; | 470 | return ret; |
| 417 | } | 471 | } |
| 418 | 472 | ||
| 473 | /** | ||
| 474 | * reservation_object_test_signaled_rcu - Test if a reservation object's | ||
| 475 | * fences have been signaled. | ||
| 476 | * @obj: the reservation object | ||
| 477 | * @test_all: if true, test all fences, otherwise only test the exclusive | ||
| 478 | * fence | ||
| 479 | * | ||
| 480 | * RETURNS | ||
| 481 | * true if all fences signaled, else false | ||
| 482 | */ | ||
| 419 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | 483 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
| 420 | bool test_all) | 484 | bool test_all) |
| 421 | { | 485 | { |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 6aa256b0a1ed..c3ee3ad98a63 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
| @@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value) | |||
| 565 | list_for_each(item, &mc_devices) { | 565 | list_for_each(item, &mc_devices) { |
| 566 | mci = list_entry(item, struct mem_ctl_info, link); | 566 | mci = list_entry(item, struct mem_ctl_info, link); |
| 567 | 567 | ||
| 568 | edac_mod_work(&mci->work, value); | 568 | if (mci->op_state == OP_RUNNING_POLL) |
| 569 | edac_mod_work(&mci->work, value); | ||
| 569 | } | 570 | } |
| 570 | mutex_unlock(&mem_ctls_mutex); | 571 | mutex_unlock(&mem_ctls_mutex); |
| 571 | } | 572 | } |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index b4d0bf6534cf..6744d88bdea8 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
| @@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { | |||
| 239 | { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, | 239 | { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, |
| 240 | }; | 240 | }; |
| 241 | 241 | ||
| 242 | #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) | 242 | #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \ |
| 243 | #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) | 243 | GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19)) |
| 244 | |||
| 245 | #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \ | ||
| 246 | GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14)) | ||
| 244 | 247 | ||
| 245 | /* Device 16, functions 2-7 */ | 248 | /* Device 16, functions 2-7 */ |
| 246 | 249 | ||
| @@ -326,6 +329,7 @@ struct pci_id_descr { | |||
| 326 | struct pci_id_table { | 329 | struct pci_id_table { |
| 327 | const struct pci_id_descr *descr; | 330 | const struct pci_id_descr *descr; |
| 328 | int n_devs; | 331 | int n_devs; |
| 332 | enum type type; | ||
| 329 | }; | 333 | }; |
| 330 | 334 | ||
| 331 | struct sbridge_dev { | 335 | struct sbridge_dev { |
| @@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = { | |||
| 394 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, | 398 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, |
| 395 | }; | 399 | }; |
| 396 | 400 | ||
| 397 | #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } | 401 | #define PCI_ID_TABLE_ENTRY(A, T) { \ |
| 402 | .descr = A, \ | ||
| 403 | .n_devs = ARRAY_SIZE(A), \ | ||
| 404 | .type = T \ | ||
| 405 | } | ||
| 406 | |||
| 398 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { | 407 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { |
| 399 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), | 408 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE), |
| 400 | {0,} /* 0 terminated list. */ | 409 | {0,} /* 0 terminated list. */ |
| 401 | }; | 410 | }; |
| 402 | 411 | ||
| @@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { | |||
| 463 | }; | 472 | }; |
| 464 | 473 | ||
| 465 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { | 474 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { |
| 466 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), | 475 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE), |
| 467 | {0,} /* 0 terminated list. */ | 476 | {0,} /* 0 terminated list. */ |
| 468 | }; | 477 | }; |
| 469 | 478 | ||
| @@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = { | |||
| 536 | }; | 545 | }; |
| 537 | 546 | ||
| 538 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { | 547 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { |
| 539 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), | 548 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL), |
| 540 | {0,} /* 0 terminated list. */ | 549 | {0,} /* 0 terminated list. */ |
| 541 | }; | 550 | }; |
| 542 | 551 | ||
| @@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = { | |||
| 580 | }; | 589 | }; |
| 581 | 590 | ||
| 582 | static const struct pci_id_table pci_dev_descr_knl_table[] = { | 591 | static const struct pci_id_table pci_dev_descr_knl_table[] = { |
| 583 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl), | 592 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING), |
| 584 | {0,} | 593 | {0,} |
| 585 | }; | 594 | }; |
| 586 | 595 | ||
| @@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = { | |||
| 648 | }; | 657 | }; |
| 649 | 658 | ||
| 650 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { | 659 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { |
| 651 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell), | 660 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL), |
| 652 | {0,} /* 0 terminated list. */ | 661 | {0,} /* 0 terminated list. */ |
| 653 | }; | 662 | }; |
| 654 | 663 | ||
| @@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci) | |||
| 1894 | pci_read_config_dword(pvt->pci_tad[i], | 1903 | pci_read_config_dword(pvt->pci_tad[i], |
| 1895 | rir_offset[j][k], | 1904 | rir_offset[j][k], |
| 1896 | ®); | 1905 | ®); |
| 1897 | tmp_mb = RIR_OFFSET(reg) << 6; | 1906 | tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6; |
| 1898 | 1907 | ||
| 1899 | gb = div_u64_rem(tmp_mb, 1024, &mb); | 1908 | gb = div_u64_rem(tmp_mb, 1024, &mb); |
| 1900 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", | 1909 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", |
| 1901 | i, j, k, | 1910 | i, j, k, |
| 1902 | gb, (mb*1000)/1024, | 1911 | gb, (mb*1000)/1024, |
| 1903 | ((u64)tmp_mb) << 20L, | 1912 | ((u64)tmp_mb) << 20L, |
| 1904 | (u32)RIR_RNK_TGT(reg), | 1913 | (u32)RIR_RNK_TGT(pvt->info.type, reg), |
| 1905 | reg); | 1914 | reg); |
| 1906 | } | 1915 | } |
| 1907 | } | 1916 | } |
| @@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
| 2234 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], | 2243 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], |
| 2235 | rir_offset[n_rir][idx], | 2244 | rir_offset[n_rir][idx], |
| 2236 | ®); | 2245 | ®); |
| 2237 | *rank = RIR_RNK_TGT(reg); | 2246 | *rank = RIR_RNK_TGT(pvt->info.type, reg); |
| 2238 | 2247 | ||
| 2239 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", | 2248 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", |
| 2240 | n_rir, | 2249 | n_rir, |
| @@ -3357,12 +3366,12 @@ fail0: | |||
| 3357 | #define ICPU(model, table) \ | 3366 | #define ICPU(model, table) \ |
| 3358 | { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } | 3367 | { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } |
| 3359 | 3368 | ||
| 3360 | /* Order here must match "enum type" */ | ||
| 3361 | static const struct x86_cpu_id sbridge_cpuids[] = { | 3369 | static const struct x86_cpu_id sbridge_cpuids[] = { |
| 3362 | ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ | 3370 | ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ |
| 3363 | ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ | 3371 | ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ |
| 3364 | ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ | 3372 | ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ |
| 3365 | ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ | 3373 | ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ |
| 3374 | ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */ | ||
| 3366 | ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ | 3375 | ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ |
| 3367 | { } | 3376 | { } |
| 3368 | }; | 3377 | }; |
| @@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id) | |||
| 3398 | mc, mc + 1, num_mc); | 3407 | mc, mc + 1, num_mc); |
| 3399 | 3408 | ||
| 3400 | sbridge_dev->mc = mc++; | 3409 | sbridge_dev->mc = mc++; |
| 3401 | rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids); | 3410 | rc = sbridge_register_mci(sbridge_dev, ptable->type); |
| 3402 | if (unlikely(rc < 0)) | 3411 | if (unlikely(rc < 0)) |
| 3403 | goto fail1; | 3412 | goto fail1; |
| 3404 | } | 3413 | } |
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index a850cbc48d8d..c49d50e68aee 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c | |||
| @@ -174,6 +174,7 @@ static __init void reserve_regions(void) | |||
| 174 | { | 174 | { |
| 175 | efi_memory_desc_t *md; | 175 | efi_memory_desc_t *md; |
| 176 | u64 paddr, npages, size; | 176 | u64 paddr, npages, size; |
| 177 | int resv; | ||
| 177 | 178 | ||
| 178 | if (efi_enabled(EFI_DBG)) | 179 | if (efi_enabled(EFI_DBG)) |
| 179 | pr_info("Processing EFI memory map:\n"); | 180 | pr_info("Processing EFI memory map:\n"); |
| @@ -190,12 +191,14 @@ static __init void reserve_regions(void) | |||
| 190 | paddr = md->phys_addr; | 191 | paddr = md->phys_addr; |
| 191 | npages = md->num_pages; | 192 | npages = md->num_pages; |
| 192 | 193 | ||
| 194 | resv = is_reserve_region(md); | ||
| 193 | if (efi_enabled(EFI_DBG)) { | 195 | if (efi_enabled(EFI_DBG)) { |
| 194 | char buf[64]; | 196 | char buf[64]; |
| 195 | 197 | ||
| 196 | pr_info(" 0x%012llx-0x%012llx %s", | 198 | pr_info(" 0x%012llx-0x%012llx %s%s\n", |
| 197 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, | 199 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, |
| 198 | efi_md_typeattr_format(buf, sizeof(buf), md)); | 200 | efi_md_typeattr_format(buf, sizeof(buf), md), |
| 201 | resv ? "*" : ""); | ||
| 199 | } | 202 | } |
| 200 | 203 | ||
| 201 | memrange_efi_to_native(&paddr, &npages); | 204 | memrange_efi_to_native(&paddr, &npages); |
| @@ -204,14 +207,9 @@ static __init void reserve_regions(void) | |||
| 204 | if (is_normal_ram(md)) | 207 | if (is_normal_ram(md)) |
| 205 | early_init_dt_add_memory_arch(paddr, size); | 208 | early_init_dt_add_memory_arch(paddr, size); |
| 206 | 209 | ||
| 207 | if (is_reserve_region(md)) { | 210 | if (resv) |
| 208 | memblock_mark_nomap(paddr, size); | 211 | memblock_mark_nomap(paddr, size); |
| 209 | if (efi_enabled(EFI_DBG)) | ||
| 210 | pr_cont("*"); | ||
| 211 | } | ||
| 212 | 212 | ||
| 213 | if (efi_enabled(EFI_DBG)) | ||
| 214 | pr_cont("\n"); | ||
| 215 | } | 213 | } |
| 216 | 214 | ||
| 217 | set_bit(EFI_MEMMAP, &efi.flags); | 215 | set_bit(EFI_MEMMAP, &efi.flags); |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 48da857f4774..a116609b1914 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB | |||
| 33 | 33 | ||
| 34 | menuconfig GPIOLIB | 34 | menuconfig GPIOLIB |
| 35 | bool "GPIO Support" | 35 | bool "GPIO Support" |
| 36 | select ANON_INODES | ||
| 36 | help | 37 | help |
| 37 | This enables GPIO support through the generic GPIO library. | 38 | This enables GPIO support through the generic GPIO library. |
| 38 | You only need to enable this, if you also want to enable | 39 | You only need to enable this, if you also want to enable |
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c index 1a647c07be67..fcf776971ca9 100644 --- a/drivers/gpio/gpio-104-dio-48e.c +++ b/drivers/gpio/gpio-104-dio-48e.c | |||
| @@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
| 75 | { | 75 | { |
| 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 77 | const unsigned io_port = offset / 8; | 77 | const unsigned io_port = offset / 8; |
| 78 | const unsigned control_port = io_port / 2; | 78 | const unsigned int control_port = io_port / 3; |
| 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 80 | unsigned long flags; | 80 | unsigned long flags; |
| 81 | unsigned control; | 81 | unsigned control; |
| @@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | |||
| 115 | { | 115 | { |
| 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 117 | const unsigned io_port = offset / 8; | 117 | const unsigned io_port = offset / 8; |
| 118 | const unsigned control_port = io_port / 2; | 118 | const unsigned int control_port = io_port / 3; |
| 119 | const unsigned mask = BIT(offset % 8); | 119 | const unsigned mask = BIT(offset % 8); |
| 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; | 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; |
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 9aabc48ff5de..953e4b829e32 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c | |||
| @@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio) | |||
| 547 | /* disable interrupts and clear status */ | 547 | /* disable interrupts and clear status */ |
| 548 | for (i = 0; i < kona_gpio->num_bank; i++) { | 548 | for (i = 0; i < kona_gpio->num_bank; i++) { |
| 549 | /* Unlock the entire bank first */ | 549 | /* Unlock the entire bank first */ |
| 550 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); | 550 | bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE); |
| 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); | 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); |
| 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); | 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); |
| 553 | /* Now re-lock the bank */ | 553 | /* Now re-lock the bank */ |
| 554 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); | 554 | bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE); |
| 555 | } | 555 | } |
| 556 | } | 556 | } |
| 557 | 557 | ||
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index d39014daeef9..fc5f197906ac 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | 29 | ||
| 30 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
| 31 | #include <mach/platform.h> | 31 | #include <mach/platform.h> |
| 32 | #include <mach/irqs.h> | ||
| 33 | 32 | ||
| 34 | #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) | 33 | #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) |
| 35 | #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) | 34 | #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) |
| @@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin) | |||
| 371 | 370 | ||
| 372 | static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) | 371 | static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) |
| 373 | { | 372 | { |
| 374 | return IRQ_LPC32XX_P0_P1_IRQ; | 373 | return -ENXIO; |
| 375 | } | 374 | } |
| 376 | 375 | ||
| 377 | static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = { | ||
| 378 | IRQ_LPC32XX_GPIO_00, | ||
| 379 | IRQ_LPC32XX_GPIO_01, | ||
| 380 | IRQ_LPC32XX_GPIO_02, | ||
| 381 | IRQ_LPC32XX_GPIO_03, | ||
| 382 | IRQ_LPC32XX_GPIO_04, | ||
| 383 | IRQ_LPC32XX_GPIO_05, | ||
| 384 | }; | ||
| 385 | |||
| 386 | static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) | 376 | static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) |
| 387 | { | 377 | { |
| 388 | if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table)) | ||
| 389 | return lpc32xx_gpio_to_irq_gpio_p3_table[offset]; | ||
| 390 | return -ENXIO; | 378 | return -ENXIO; |
| 391 | } | 379 | } |
| 392 | 380 | ||
| 393 | static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = { | ||
| 394 | IRQ_LPC32XX_GPI_00, | ||
| 395 | IRQ_LPC32XX_GPI_01, | ||
| 396 | IRQ_LPC32XX_GPI_02, | ||
| 397 | IRQ_LPC32XX_GPI_03, | ||
| 398 | IRQ_LPC32XX_GPI_04, | ||
| 399 | IRQ_LPC32XX_GPI_05, | ||
| 400 | IRQ_LPC32XX_GPI_06, | ||
| 401 | IRQ_LPC32XX_GPI_07, | ||
| 402 | IRQ_LPC32XX_GPI_08, | ||
| 403 | IRQ_LPC32XX_GPI_09, | ||
| 404 | -ENXIO, /* 10 */ | ||
| 405 | -ENXIO, /* 11 */ | ||
| 406 | -ENXIO, /* 12 */ | ||
| 407 | -ENXIO, /* 13 */ | ||
| 408 | -ENXIO, /* 14 */ | ||
| 409 | -ENXIO, /* 15 */ | ||
| 410 | -ENXIO, /* 16 */ | ||
| 411 | -ENXIO, /* 17 */ | ||
| 412 | -ENXIO, /* 18 */ | ||
| 413 | IRQ_LPC32XX_GPI_19, | ||
| 414 | -ENXIO, /* 20 */ | ||
| 415 | -ENXIO, /* 21 */ | ||
| 416 | -ENXIO, /* 22 */ | ||
| 417 | -ENXIO, /* 23 */ | ||
| 418 | -ENXIO, /* 24 */ | ||
| 419 | -ENXIO, /* 25 */ | ||
| 420 | -ENXIO, /* 26 */ | ||
| 421 | -ENXIO, /* 27 */ | ||
| 422 | IRQ_LPC32XX_GPI_28, | ||
| 423 | }; | ||
| 424 | |||
| 425 | static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) | 381 | static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) |
| 426 | { | 382 | { |
| 427 | if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table)) | ||
| 428 | return lpc32xx_gpio_to_irq_gpi_p3_table[offset]; | ||
| 429 | return -ENXIO; | 383 | return -ENXIO; |
| 430 | } | 384 | } |
| 431 | 385 | ||
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 75c6355b018d..e72794e463aa 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c | |||
| @@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev) | |||
| 709 | dev_err(&pdev->dev, "input clock not found.\n"); | 709 | dev_err(&pdev->dev, "input clock not found.\n"); |
| 710 | return PTR_ERR(gpio->clk); | 710 | return PTR_ERR(gpio->clk); |
| 711 | } | 711 | } |
| 712 | ret = clk_prepare_enable(gpio->clk); | ||
| 713 | if (ret) { | ||
| 714 | dev_err(&pdev->dev, "Unable to enable clock.\n"); | ||
| 715 | return ret; | ||
| 716 | } | ||
| 712 | 717 | ||
| 718 | pm_runtime_set_active(&pdev->dev); | ||
| 713 | pm_runtime_enable(&pdev->dev); | 719 | pm_runtime_enable(&pdev->dev); |
| 714 | ret = pm_runtime_get_sync(&pdev->dev); | 720 | ret = pm_runtime_get_sync(&pdev->dev); |
| 715 | if (ret < 0) | 721 | if (ret < 0) |
| @@ -747,6 +753,7 @@ err_pm_put: | |||
| 747 | pm_runtime_put(&pdev->dev); | 753 | pm_runtime_put(&pdev->dev); |
| 748 | err_pm_dis: | 754 | err_pm_dis: |
| 749 | pm_runtime_disable(&pdev->dev); | 755 | pm_runtime_disable(&pdev->dev); |
| 756 | clk_disable_unprepare(gpio->clk); | ||
| 750 | 757 | ||
| 751 | return ret; | 758 | return ret; |
| 752 | } | 759 | } |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index d22dcc38179d..4aabddb38b59 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| 19 | #include <linux/io-mapping.h> | ||
| 19 | #include <linux/gpio/consumer.h> | 20 | #include <linux/gpio/consumer.h> |
| 20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
| 21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d407f904a31c..58d822d7e8da 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
| 21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
| 22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
| 23 | #include <linux/compat.h> | ||
| 23 | #include <uapi/linux/gpio.h> | 24 | #include <uapi/linux/gpio.h> |
| 24 | 25 | ||
| 25 | #include "gpiolib.h" | 26 | #include "gpiolib.h" |
| @@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 316 | { | 317 | { |
| 317 | struct gpio_device *gdev = filp->private_data; | 318 | struct gpio_device *gdev = filp->private_data; |
| 318 | struct gpio_chip *chip = gdev->chip; | 319 | struct gpio_chip *chip = gdev->chip; |
| 319 | int __user *ip = (int __user *)arg; | 320 | void __user *ip = (void __user *)arg; |
| 320 | 321 | ||
| 321 | /* We fail any subsequent ioctl():s when the chip is gone */ | 322 | /* We fail any subsequent ioctl():s when the chip is gone */ |
| 322 | if (!chip) | 323 | if (!chip) |
| @@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 388 | return -EINVAL; | 389 | return -EINVAL; |
| 389 | } | 390 | } |
| 390 | 391 | ||
| 392 | #ifdef CONFIG_COMPAT | ||
| 393 | static long gpio_ioctl_compat(struct file *filp, unsigned int cmd, | ||
| 394 | unsigned long arg) | ||
| 395 | { | ||
| 396 | return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | ||
| 397 | } | ||
| 398 | #endif | ||
| 399 | |||
| 391 | /** | 400 | /** |
| 392 | * gpio_chrdev_open() - open the chardev for ioctl operations | 401 | * gpio_chrdev_open() - open the chardev for ioctl operations |
| 393 | * @inode: inode for this chardev | 402 | * @inode: inode for this chardev |
| @@ -431,14 +440,15 @@ static const struct file_operations gpio_fileops = { | |||
| 431 | .owner = THIS_MODULE, | 440 | .owner = THIS_MODULE, |
| 432 | .llseek = noop_llseek, | 441 | .llseek = noop_llseek, |
| 433 | .unlocked_ioctl = gpio_ioctl, | 442 | .unlocked_ioctl = gpio_ioctl, |
| 434 | .compat_ioctl = gpio_ioctl, | 443 | #ifdef CONFIG_COMPAT |
| 444 | .compat_ioctl = gpio_ioctl_compat, | ||
| 445 | #endif | ||
| 435 | }; | 446 | }; |
| 436 | 447 | ||
| 437 | static void gpiodevice_release(struct device *dev) | 448 | static void gpiodevice_release(struct device *dev) |
| 438 | { | 449 | { |
| 439 | struct gpio_device *gdev = dev_get_drvdata(dev); | 450 | struct gpio_device *gdev = dev_get_drvdata(dev); |
| 440 | 451 | ||
| 441 | cdev_del(&gdev->chrdev); | ||
| 442 | list_del(&gdev->list); | 452 | list_del(&gdev->list); |
| 443 | ida_simple_remove(&gpio_ida, gdev->id); | 453 | ida_simple_remove(&gpio_ida, gdev->id); |
| 444 | kfree(gdev->label); | 454 | kfree(gdev->label); |
| @@ -471,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) | |||
| 471 | 481 | ||
| 472 | /* From this point, the .release() function cleans up gpio_device */ | 482 | /* From this point, the .release() function cleans up gpio_device */ |
| 473 | gdev->dev.release = gpiodevice_release; | 483 | gdev->dev.release = gpiodevice_release; |
| 474 | get_device(&gdev->dev); | ||
| 475 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", | 484 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", |
| 476 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, | 485 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, |
| 477 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); | 486 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); |
| @@ -618,6 +627,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) | |||
| 618 | goto err_free_label; | 627 | goto err_free_label; |
| 619 | } | 628 | } |
| 620 | 629 | ||
| 630 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
| 631 | |||
| 621 | for (i = 0; i < chip->ngpio; i++) { | 632 | for (i = 0; i < chip->ngpio; i++) { |
| 622 | struct gpio_desc *desc = &gdev->descs[i]; | 633 | struct gpio_desc *desc = &gdev->descs[i]; |
| 623 | 634 | ||
| @@ -649,8 +660,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) | |||
| 649 | } | 660 | } |
| 650 | } | 661 | } |
| 651 | 662 | ||
| 652 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
| 653 | |||
| 654 | #ifdef CONFIG_PINCTRL | 663 | #ifdef CONFIG_PINCTRL |
| 655 | INIT_LIST_HEAD(&gdev->pin_ranges); | 664 | INIT_LIST_HEAD(&gdev->pin_ranges); |
| 656 | #endif | 665 | #endif |
| @@ -759,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip) | |||
| 759 | * be removed, else it will be dangling until the last user is | 768 | * be removed, else it will be dangling until the last user is |
| 760 | * gone. | 769 | * gone. |
| 761 | */ | 770 | */ |
| 771 | cdev_del(&gdev->chrdev); | ||
| 772 | device_del(&gdev->dev); | ||
| 762 | put_device(&gdev->dev); | 773 | put_device(&gdev->dev); |
| 763 | } | 774 | } |
| 764 | EXPORT_SYMBOL_GPL(gpiochip_remove); | 775 | EXPORT_SYMBOL_GPL(gpiochip_remove); |
| @@ -858,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data, | |||
| 858 | 869 | ||
| 859 | spin_lock_irqsave(&gpio_lock, flags); | 870 | spin_lock_irqsave(&gpio_lock, flags); |
| 860 | list_for_each_entry(gdev, &gpio_devices, list) | 871 | list_for_each_entry(gdev, &gpio_devices, list) |
| 861 | if (match(gdev->chip, data)) | 872 | if (gdev->chip && match(gdev->chip, data)) |
| 862 | break; | 873 | break; |
| 863 | 874 | ||
| 864 | /* No match? */ | 875 | /* No match? */ |
| @@ -1356,10 +1367,13 @@ done: | |||
| 1356 | /* | 1367 | /* |
| 1357 | * This descriptor validation needs to be inserted verbatim into each | 1368 | * This descriptor validation needs to be inserted verbatim into each |
| 1358 | * function taking a descriptor, so we need to use a preprocessor | 1369 | * function taking a descriptor, so we need to use a preprocessor |
| 1359 | * macro to avoid endless duplication. | 1370 | * macro to avoid endless duplication. If the desc is NULL it is an |
| 1371 | * optional GPIO and calls should just bail out. | ||
| 1360 | */ | 1372 | */ |
| 1361 | #define VALIDATE_DESC(desc) do { \ | 1373 | #define VALIDATE_DESC(desc) do { \ |
| 1362 | if (!desc || !desc->gdev) { \ | 1374 | if (!desc) \ |
| 1375 | return 0; \ | ||
| 1376 | if (!desc->gdev) { \ | ||
| 1363 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1377 | pr_warn("%s: invalid GPIO\n", __func__); \ |
| 1364 | return -EINVAL; \ | 1378 | return -EINVAL; \ |
| 1365 | } \ | 1379 | } \ |
| @@ -1370,7 +1384,9 @@ done: | |||
| 1370 | } } while (0) | 1384 | } } while (0) |
| 1371 | 1385 | ||
| 1372 | #define VALIDATE_DESC_VOID(desc) do { \ | 1386 | #define VALIDATE_DESC_VOID(desc) do { \ |
| 1373 | if (!desc || !desc->gdev) { \ | 1387 | if (!desc) \ |
| 1388 | return; \ | ||
| 1389 | if (!desc->gdev) { \ | ||
| 1374 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1390 | pr_warn("%s: invalid GPIO\n", __func__); \ |
| 1375 | return; \ | 1391 | return; \ |
| 1376 | } \ | 1392 | } \ |
| @@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); | |||
| 2066 | */ | 2082 | */ |
| 2067 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) | 2083 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) |
| 2068 | { | 2084 | { |
| 2069 | if (offset >= chip->ngpio) | 2085 | struct gpio_desc *desc; |
| 2070 | return -EINVAL; | 2086 | |
| 2087 | desc = gpiochip_get_desc(chip, offset); | ||
| 2088 | if (IS_ERR(desc)) | ||
| 2089 | return PTR_ERR(desc); | ||
| 2090 | |||
| 2091 | /* Flush direction if something changed behind our back */ | ||
| 2092 | if (chip->get_direction) { | ||
| 2093 | int dir = chip->get_direction(chip, offset); | ||
| 2094 | |||
| 2095 | if (dir) | ||
| 2096 | clear_bit(FLAG_IS_OUT, &desc->flags); | ||
| 2097 | else | ||
| 2098 | set_bit(FLAG_IS_OUT, &desc->flags); | ||
| 2099 | } | ||
| 2071 | 2100 | ||
| 2072 | if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { | 2101 | if (test_bit(FLAG_IS_OUT, &desc->flags)) { |
| 2073 | chip_err(chip, | 2102 | chip_err(chip, |
| 2074 | "%s: tried to flag a GPIO set as output for IRQ\n", | 2103 | "%s: tried to flag a GPIO set as output for IRQ\n", |
| 2075 | __func__); | 2104 | __func__); |
| 2076 | return -EIO; | 2105 | return -EIO; |
| 2077 | } | 2106 | } |
| 2078 | 2107 | ||
| 2079 | set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); | 2108 | set_bit(FLAG_USED_AS_IRQ, &desc->flags); |
| 2080 | return 0; | 2109 | return 0; |
| 2081 | } | 2110 | } |
| 2082 | EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); | 2111 | EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 992f00b65be4..01c36b8d6222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -799,6 +799,7 @@ struct amdgpu_ring { | |||
| 799 | unsigned cond_exe_offs; | 799 | unsigned cond_exe_offs; |
| 800 | u64 cond_exe_gpu_addr; | 800 | u64 cond_exe_gpu_addr; |
| 801 | volatile u32 *cond_exe_cpu_addr; | 801 | volatile u32 *cond_exe_cpu_addr; |
| 802 | int vmid; | ||
| 802 | }; | 803 | }; |
| 803 | 804 | ||
| 804 | /* | 805 | /* |
| @@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 936 | unsigned vm_id, uint64_t pd_addr, | 937 | unsigned vm_id, uint64_t pd_addr, |
| 937 | uint32_t gds_base, uint32_t gds_size, | 938 | uint32_t gds_base, uint32_t gds_size, |
| 938 | uint32_t gws_base, uint32_t gws_size, | 939 | uint32_t gws_base, uint32_t gws_size, |
| 939 | uint32_t oa_base, uint32_t oa_size); | 940 | uint32_t oa_base, uint32_t oa_size, |
| 941 | bool vmid_switch); | ||
| 940 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | 942 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); |
| 941 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); | 943 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); |
| 942 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | 944 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 199f76baf22c..8943099eb135 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) | |||
| 696 | return result; | 696 | return result; |
| 697 | } | 697 | } |
| 698 | 698 | ||
| 699 | static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) | ||
| 700 | { | ||
| 701 | CGS_FUNC_ADEV; | ||
| 702 | if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { | ||
| 703 | release_firmware(adev->pm.fw); | ||
| 704 | return 0; | ||
| 705 | } | ||
| 706 | /* cannot release other firmware because they are not created by cgs */ | ||
| 707 | return -EINVAL; | ||
| 708 | } | ||
| 709 | |||
| 699 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | 710 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, |
| 700 | enum cgs_ucode_id type, | 711 | enum cgs_ucode_id type, |
| 701 | struct cgs_firmware_info *info) | 712 | struct cgs_firmware_info *info) |
| @@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
| 1125 | amdgpu_cgs_pm_query_clock_limits, | 1136 | amdgpu_cgs_pm_query_clock_limits, |
| 1126 | amdgpu_cgs_set_camera_voltages, | 1137 | amdgpu_cgs_set_camera_voltages, |
| 1127 | amdgpu_cgs_get_firmware_info, | 1138 | amdgpu_cgs_get_firmware_info, |
| 1139 | amdgpu_cgs_rel_firmware, | ||
| 1128 | amdgpu_cgs_set_powergating_state, | 1140 | amdgpu_cgs_set_powergating_state, |
| 1129 | amdgpu_cgs_set_clockgating_state, | 1141 | amdgpu_cgs_set_clockgating_state, |
| 1130 | amdgpu_cgs_get_active_displays_info, | 1142 | amdgpu_cgs_get_active_displays_info, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bb8b149786d7..964f31404f17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) | |||
| 827 | */ | 827 | */ |
| 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) | 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) |
| 829 | { | 829 | { |
| 830 | if (adev->mode_info.atom_context) | 830 | if (adev->mode_info.atom_context) { |
| 831 | kfree(adev->mode_info.atom_context->scratch); | 831 | kfree(adev->mode_info.atom_context->scratch); |
| 832 | kfree(adev->mode_info.atom_context->iio); | ||
| 833 | } | ||
| 832 | kfree(adev->mode_info.atom_context); | 834 | kfree(adev->mode_info.atom_context); |
| 833 | adev->mode_info.atom_context = NULL; | 835 | adev->mode_info.atom_context = NULL; |
| 834 | kfree(adev->mode_info.atom_card_info); | 836 | kfree(adev->mode_info.atom_card_info); |
| @@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
| 1325 | adev->ip_block_status[i].valid = false; | 1327 | adev->ip_block_status[i].valid = false; |
| 1326 | } | 1328 | } |
| 1327 | 1329 | ||
| 1330 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | ||
| 1331 | if (adev->ip_blocks[i].funcs->late_fini) | ||
| 1332 | adev->ip_blocks[i].funcs->late_fini((void *)adev); | ||
| 1333 | } | ||
| 1334 | |||
| 1328 | return 0; | 1335 | return 0; |
| 1329 | } | 1336 | } |
| 1330 | 1337 | ||
| @@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
| 1513 | amdgpu_atombios_has_gpu_virtualization_table(adev); | 1520 | amdgpu_atombios_has_gpu_virtualization_table(adev); |
| 1514 | 1521 | ||
| 1515 | /* Post card if necessary */ | 1522 | /* Post card if necessary */ |
| 1516 | if (!amdgpu_card_posted(adev) || | 1523 | if (!amdgpu_card_posted(adev)) { |
| 1517 | adev->virtualization.supports_sr_iov) { | ||
| 1518 | if (!adev->bios) { | 1524 | if (!adev->bios) { |
| 1519 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); | 1525 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); |
| 1520 | return -EINVAL; | 1526 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 34e35423b78e..7a0b1e50f293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 122 | bool skip_preamble, need_ctx_switch; | 122 | bool skip_preamble, need_ctx_switch; |
| 123 | unsigned patch_offset = ~0; | 123 | unsigned patch_offset = ~0; |
| 124 | struct amdgpu_vm *vm; | 124 | struct amdgpu_vm *vm; |
| 125 | int vmid = 0, old_vmid = ring->vmid; | ||
| 125 | struct fence *hwf; | 126 | struct fence *hwf; |
| 126 | uint64_t ctx; | 127 | uint64_t ctx; |
| 127 | 128 | ||
| @@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 135 | if (job) { | 136 | if (job) { |
| 136 | vm = job->vm; | 137 | vm = job->vm; |
| 137 | ctx = job->ctx; | 138 | ctx = job->ctx; |
| 139 | vmid = job->vm_id; | ||
| 138 | } else { | 140 | } else { |
| 139 | vm = NULL; | 141 | vm = NULL; |
| 140 | ctx = 0; | 142 | ctx = 0; |
| 143 | vmid = 0; | ||
| 141 | } | 144 | } |
| 142 | 145 | ||
| 143 | if (!ring->ready) { | 146 | if (!ring->ready) { |
| @@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 163 | r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, | 166 | r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, |
| 164 | job->gds_base, job->gds_size, | 167 | job->gds_base, job->gds_size, |
| 165 | job->gws_base, job->gws_size, | 168 | job->gws_base, job->gws_size, |
| 166 | job->oa_base, job->oa_size); | 169 | job->oa_base, job->oa_size, |
| 170 | (ring->current_ctx == ctx) && (old_vmid != vmid)); | ||
| 167 | if (r) { | 171 | if (r) { |
| 168 | amdgpu_ring_undo(ring); | 172 | amdgpu_ring_undo(ring); |
| 169 | return r; | 173 | return r; |
| @@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 180 | need_ctx_switch = ring->current_ctx != ctx; | 184 | need_ctx_switch = ring->current_ctx != ctx; |
| 181 | for (i = 0; i < num_ibs; ++i) { | 185 | for (i = 0; i < num_ibs; ++i) { |
| 182 | ib = &ibs[i]; | 186 | ib = &ibs[i]; |
| 183 | |||
| 184 | /* drop preamble IBs if we don't have a context switch */ | 187 | /* drop preamble IBs if we don't have a context switch */ |
| 185 | if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) | 188 | if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) |
| 186 | continue; | 189 | continue; |
| @@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 188 | amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, | 191 | amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, |
| 189 | need_ctx_switch); | 192 | need_ctx_switch); |
| 190 | need_ctx_switch = false; | 193 | need_ctx_switch = false; |
| 194 | ring->vmid = vmid; | ||
| 191 | } | 195 | } |
| 192 | 196 | ||
| 193 | if (ring->funcs->emit_hdp_invalidate) | 197 | if (ring->funcs->emit_hdp_invalidate) |
| @@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 198 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); | 202 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); |
| 199 | if (job && job->vm_id) | 203 | if (job && job->vm_id) |
| 200 | amdgpu_vm_reset_id(adev, job->vm_id); | 204 | amdgpu_vm_reset_id(adev, job->vm_id); |
| 205 | ring->vmid = old_vmid; | ||
| 201 | amdgpu_ring_undo(ring); | 206 | amdgpu_ring_undo(ring); |
| 202 | return r; | 207 | return r; |
| 203 | } | 208 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 6bd961fb43dc..82256558e0f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
| @@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle) | |||
| 183 | if (ret) | 183 | if (ret) |
| 184 | return ret; | 184 | return ret; |
| 185 | 185 | ||
| 186 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 187 | if (adev->pp_enabled) { | ||
| 188 | amdgpu_pm_sysfs_fini(adev); | ||
| 189 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 190 | } | ||
| 191 | #endif | ||
| 192 | |||
| 193 | return ret; | 186 | return ret; |
| 194 | } | 187 | } |
| 195 | 188 | ||
| @@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle) | |||
| 223 | return ret; | 216 | return ret; |
| 224 | } | 217 | } |
| 225 | 218 | ||
| 219 | static void amdgpu_pp_late_fini(void *handle) | ||
| 220 | { | ||
| 221 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 222 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 223 | |||
| 224 | if (adev->pp_enabled) { | ||
| 225 | amdgpu_pm_sysfs_fini(adev); | ||
| 226 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 227 | } | ||
| 228 | |||
| 229 | if (adev->powerplay.ip_funcs->late_fini) | ||
| 230 | adev->powerplay.ip_funcs->late_fini( | ||
| 231 | adev->powerplay.pp_handle); | ||
| 232 | #endif | ||
| 233 | } | ||
| 234 | |||
| 226 | static int amdgpu_pp_suspend(void *handle) | 235 | static int amdgpu_pp_suspend(void *handle) |
| 227 | { | 236 | { |
| 228 | int ret = 0; | 237 | int ret = 0; |
| @@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | |||
| 311 | .sw_fini = amdgpu_pp_sw_fini, | 320 | .sw_fini = amdgpu_pp_sw_fini, |
| 312 | .hw_init = amdgpu_pp_hw_init, | 321 | .hw_init = amdgpu_pp_hw_init, |
| 313 | .hw_fini = amdgpu_pp_hw_fini, | 322 | .hw_fini = amdgpu_pp_hw_fini, |
| 323 | .late_fini = amdgpu_pp_late_fini, | ||
| 314 | .suspend = amdgpu_pp_suspend, | 324 | .suspend = amdgpu_pp_suspend, |
| 315 | .resume = amdgpu_pp_resume, | 325 | .resume = amdgpu_pp_resume, |
| 316 | .is_idle = amdgpu_pp_is_idle, | 326 | .is_idle = amdgpu_pp_is_idle, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3b02272db678..870f9494252c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
| @@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | |||
| 343 | ring->ring = NULL; | 343 | ring->ring = NULL; |
| 344 | ring->ring_obj = NULL; | 344 | ring->ring_obj = NULL; |
| 345 | 345 | ||
| 346 | amdgpu_wb_free(ring->adev, ring->cond_exe_offs); | ||
| 346 | amdgpu_wb_free(ring->adev, ring->fence_offs); | 347 | amdgpu_wb_free(ring->adev, ring->fence_offs); |
| 347 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | 348 | amdgpu_wb_free(ring->adev, ring->rptr_offs); |
| 348 | amdgpu_wb_free(ring->adev, ring->wptr_offs); | 349 | amdgpu_wb_free(ring->adev, ring->wptr_offs); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..48618ee324eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |||
| 115 | return r; | 115 | return r; |
| 116 | } | 116 | } |
| 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); | 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); |
| 118 | memset(sa_manager->cpu_ptr, 0, sa_manager->size); | ||
| 118 | amdgpu_bo_unreserve(sa_manager->bo); | 119 | amdgpu_bo_unreserve(sa_manager->bo); |
| 119 | return r; | 120 | return r; |
| 120 | } | 121 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 01abfc21b4a2..e19520c4b4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |||
| 253 | { | 253 | { |
| 254 | int r; | 254 | int r; |
| 255 | 255 | ||
| 256 | if (adev->uvd.vcpu_bo == NULL) | 256 | kfree(adev->uvd.saved_bo); |
| 257 | return 0; | ||
| 258 | 257 | ||
| 259 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); | 258 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); |
| 260 | 259 | ||
| 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | 260 | if (adev->uvd.vcpu_bo) { |
| 262 | if (!r) { | 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); |
| 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | 262 | if (!r) { |
| 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); |
| 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); |
| 266 | } | 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); |
| 266 | } | ||
| 267 | 267 | ||
| 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); |
| 269 | } | ||
| 269 | 270 | ||
| 270 | amdgpu_ring_fini(&adev->uvd.ring); | 271 | amdgpu_ring_fini(&adev->uvd.ring); |
| 271 | 272 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9f36ed30ba11..62a4c127620f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 298 | unsigned vm_id, uint64_t pd_addr, | 298 | unsigned vm_id, uint64_t pd_addr, |
| 299 | uint32_t gds_base, uint32_t gds_size, | 299 | uint32_t gds_base, uint32_t gds_size, |
| 300 | uint32_t gws_base, uint32_t gws_size, | 300 | uint32_t gws_base, uint32_t gws_size, |
| 301 | uint32_t oa_base, uint32_t oa_size) | 301 | uint32_t oa_base, uint32_t oa_size, |
| 302 | bool vmid_switch) | ||
| 302 | { | 303 | { |
| 303 | struct amdgpu_device *adev = ring->adev; | 304 | struct amdgpu_device *adev = ring->adev; |
| 304 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; | 305 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; |
| @@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 312 | int r; | 313 | int r; |
| 313 | 314 | ||
| 314 | if (ring->funcs->emit_pipeline_sync && ( | 315 | if (ring->funcs->emit_pipeline_sync && ( |
| 315 | pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || | 316 | pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch)) |
| 316 | ring->type == AMDGPU_RING_TYPE_COMPUTE)) | ||
| 317 | amdgpu_ring_emit_pipeline_sync(ring); | 317 | amdgpu_ring_emit_pipeline_sync(ring); |
| 318 | 318 | ||
| 319 | if (ring->funcs->emit_vm_flush && | 319 | if (ring->funcs->emit_vm_flush && |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index ea407db1fbcf..5ec1f1e9c983 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle) | |||
| 6221 | ci_dpm_fini(adev); | 6221 | ci_dpm_fini(adev); |
| 6222 | mutex_unlock(&adev->pm.mutex); | 6222 | mutex_unlock(&adev->pm.mutex); |
| 6223 | 6223 | ||
| 6224 | release_firmware(adev->pm.fw); | ||
| 6225 | adev->pm.fw = NULL; | ||
| 6226 | |||
| 6224 | return 0; | 6227 | return 0; |
| 6225 | } | 6228 | } |
| 6226 | 6229 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 518dca43b133..9dc4e24e31e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
| @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); | |||
| 66 | 66 | ||
| 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); | 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); |
| 68 | 68 | ||
| 69 | |||
| 70 | static void cik_sdma_free_microcode(struct amdgpu_device *adev) | ||
| 71 | { | ||
| 72 | int i; | ||
| 73 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 74 | release_firmware(adev->sdma.instance[i].fw); | ||
| 75 | adev->sdma.instance[i].fw = NULL; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 69 | /* | 79 | /* |
| 70 | * sDMA - System DMA | 80 | * sDMA - System DMA |
| 71 | * Starting with CIK, the GPU has new asynchronous | 81 | * Starting with CIK, the GPU has new asynchronous |
| @@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 419 | /* Initialize the ring buffer's read and write pointers */ | 429 | /* Initialize the ring buffer's read and write pointers */ |
| 420 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 430 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 421 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 431 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 432 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 433 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 422 | 434 | ||
| 423 | /* set the wb address whether it's enabled or not */ | 435 | /* set the wb address whether it's enabled or not */ |
| 424 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 436 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 446 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 458 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 447 | 459 | ||
| 448 | ring->ready = true; | 460 | ring->ready = true; |
| 461 | } | ||
| 462 | |||
| 463 | cik_sdma_enable(adev, true); | ||
| 449 | 464 | ||
| 465 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 466 | ring = &adev->sdma.instance[i].ring; | ||
| 450 | r = amdgpu_ring_test_ring(ring); | 467 | r = amdgpu_ring_test_ring(ring); |
| 451 | if (r) { | 468 | if (r) { |
| 452 | ring->ready = false; | 469 | ring->ready = false; |
| @@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) | |||
| 529 | if (r) | 546 | if (r) |
| 530 | return r; | 547 | return r; |
| 531 | 548 | ||
| 532 | /* unhalt the MEs */ | 549 | /* halt the engine before programing */ |
| 533 | cik_sdma_enable(adev, true); | 550 | cik_sdma_enable(adev, false); |
| 534 | 551 | ||
| 535 | /* start the gfx rings and rlc compute queues */ | 552 | /* start the gfx rings and rlc compute queues */ |
| 536 | r = cik_sdma_gfx_resume(adev); | 553 | r = cik_sdma_gfx_resume(adev); |
| @@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle) | |||
| 998 | for (i = 0; i < adev->sdma.num_instances; i++) | 1015 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 999 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1016 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1000 | 1017 | ||
| 1018 | cik_sdma_free_microcode(adev); | ||
| 1001 | return 0; | 1019 | return 0; |
| 1002 | } | 1020 | } |
| 1003 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 245cabf06575..ed03b75175d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c | |||
| @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int fiji_dpm_sw_fini(void *handle) | 73 | static int fiji_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 7f18a53ab53a..8c6ad1e72f02 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
| @@ -991,6 +991,22 @@ out: | |||
| 991 | return err; | 991 | return err; |
| 992 | } | 992 | } |
| 993 | 993 | ||
| 994 | static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) | ||
| 995 | { | ||
| 996 | release_firmware(adev->gfx.pfp_fw); | ||
| 997 | adev->gfx.pfp_fw = NULL; | ||
| 998 | release_firmware(adev->gfx.me_fw); | ||
| 999 | adev->gfx.me_fw = NULL; | ||
| 1000 | release_firmware(adev->gfx.ce_fw); | ||
| 1001 | adev->gfx.ce_fw = NULL; | ||
| 1002 | release_firmware(adev->gfx.mec_fw); | ||
| 1003 | adev->gfx.mec_fw = NULL; | ||
| 1004 | release_firmware(adev->gfx.mec2_fw); | ||
| 1005 | adev->gfx.mec2_fw = NULL; | ||
| 1006 | release_firmware(adev->gfx.rlc_fw); | ||
| 1007 | adev->gfx.rlc_fw = NULL; | ||
| 1008 | } | ||
| 1009 | |||
| 994 | /** | 1010 | /** |
| 995 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table | 1011 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table |
| 996 | * | 1012 | * |
| @@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle) | |||
| 4489 | gfx_v7_0_cp_compute_fini(adev); | 4505 | gfx_v7_0_cp_compute_fini(adev); |
| 4490 | gfx_v7_0_rlc_fini(adev); | 4506 | gfx_v7_0_rlc_fini(adev); |
| 4491 | gfx_v7_0_mec_fini(adev); | 4507 | gfx_v7_0_mec_fini(adev); |
| 4508 | gfx_v7_0_free_microcode(adev); | ||
| 4492 | 4509 | ||
| 4493 | return 0; | 4510 | return 0; |
| 4494 | } | 4511 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f19bab68fd83..9f6f8669edc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -836,6 +836,26 @@ err1: | |||
| 836 | return r; | 836 | return r; |
| 837 | } | 837 | } |
| 838 | 838 | ||
| 839 | |||
| 840 | static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { | ||
| 841 | release_firmware(adev->gfx.pfp_fw); | ||
| 842 | adev->gfx.pfp_fw = NULL; | ||
| 843 | release_firmware(adev->gfx.me_fw); | ||
| 844 | adev->gfx.me_fw = NULL; | ||
| 845 | release_firmware(adev->gfx.ce_fw); | ||
| 846 | adev->gfx.ce_fw = NULL; | ||
| 847 | release_firmware(adev->gfx.rlc_fw); | ||
| 848 | adev->gfx.rlc_fw = NULL; | ||
| 849 | release_firmware(adev->gfx.mec_fw); | ||
| 850 | adev->gfx.mec_fw = NULL; | ||
| 851 | if ((adev->asic_type != CHIP_STONEY) && | ||
| 852 | (adev->asic_type != CHIP_TOPAZ)) | ||
| 853 | release_firmware(adev->gfx.mec2_fw); | ||
| 854 | adev->gfx.mec2_fw = NULL; | ||
| 855 | |||
| 856 | kfree(adev->gfx.rlc.register_list_format); | ||
| 857 | } | ||
| 858 | |||
| 839 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | 859 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) |
| 840 | { | 860 | { |
| 841 | const char *chip_name; | 861 | const char *chip_name; |
| @@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
| 1983 | 2003 | ||
| 1984 | gfx_v8_0_rlc_fini(adev); | 2004 | gfx_v8_0_rlc_fini(adev); |
| 1985 | 2005 | ||
| 1986 | kfree(adev->gfx.rlc.register_list_format); | 2006 | gfx_v8_0_free_microcode(adev); |
| 1987 | 2007 | ||
| 1988 | return 0; | 2008 | return 0; |
| 1989 | } | 2009 | } |
| @@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
| 3974 | amdgpu_ring_write(ring, 0x3a00161a); | 3994 | amdgpu_ring_write(ring, 0x3a00161a); |
| 3975 | amdgpu_ring_write(ring, 0x0000002e); | 3995 | amdgpu_ring_write(ring, 0x0000002e); |
| 3976 | break; | 3996 | break; |
| 3977 | case CHIP_TOPAZ: | ||
| 3978 | case CHIP_CARRIZO: | 3997 | case CHIP_CARRIZO: |
| 3979 | amdgpu_ring_write(ring, 0x00000002); | 3998 | amdgpu_ring_write(ring, 0x00000002); |
| 3980 | amdgpu_ring_write(ring, 0x00000000); | 3999 | amdgpu_ring_write(ring, 0x00000000); |
| 3981 | break; | 4000 | break; |
| 4001 | case CHIP_TOPAZ: | ||
| 4002 | amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? | ||
| 4003 | 0x00000000 : 0x00000002); | ||
| 4004 | amdgpu_ring_write(ring, 0x00000000); | ||
| 4005 | break; | ||
| 3982 | case CHIP_STONEY: | 4006 | case CHIP_STONEY: |
| 3983 | amdgpu_ring_write(ring, 0x00000000); | 4007 | amdgpu_ring_write(ring, 0x00000000); |
| 3984 | amdgpu_ring_write(ring, 0x00000000); | 4008 | amdgpu_ring_write(ring, 0x00000000); |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 460bc8ad37e6..825ccd63f2dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | |||
| @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int iceland_dpm_sw_fini(void *handle) | 73 | static int iceland_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f4c3130d3fdb..b556bd0a8797 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
| @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) | |||
| 105 | } | 105 | } |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) | ||
| 109 | { | ||
| 110 | int i; | ||
| 111 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 112 | release_firmware(adev->sdma.instance[i].fw); | ||
| 113 | adev->sdma.instance[i].fw = NULL; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 108 | /** | 117 | /** |
| 109 | * sdma_v2_4_init_microcode - load ucode images from disk | 118 | * sdma_v2_4_init_microcode - load ucode images from disk |
| 110 | * | 119 | * |
| @@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 461 | /* Initialize the ring buffer's read and write pointers */ | 470 | /* Initialize the ring buffer's read and write pointers */ |
| 462 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 471 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 463 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 472 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 473 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 474 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 464 | 475 | ||
| 465 | /* set the wb address whether it's enabled or not */ | 476 | /* set the wb address whether it's enabled or not */ |
| 466 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 477 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 489 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 500 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 490 | 501 | ||
| 491 | ring->ready = true; | 502 | ring->ready = true; |
| 503 | } | ||
| 492 | 504 | ||
| 505 | sdma_v2_4_enable(adev, true); | ||
| 506 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 507 | ring = &adev->sdma.instance[i].ring; | ||
| 493 | r = amdgpu_ring_test_ring(ring); | 508 | r = amdgpu_ring_test_ring(ring); |
| 494 | if (r) { | 509 | if (r) { |
| 495 | ring->ready = false; | 510 | ring->ready = false; |
| @@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) | |||
| 580 | return -EINVAL; | 595 | return -EINVAL; |
| 581 | } | 596 | } |
| 582 | 597 | ||
| 583 | /* unhalt the MEs */ | 598 | /* halt the engine before programing */ |
| 584 | sdma_v2_4_enable(adev, true); | 599 | sdma_v2_4_enable(adev, false); |
| 585 | 600 | ||
| 586 | /* start the gfx rings and rlc compute queues */ | 601 | /* start the gfx rings and rlc compute queues */ |
| 587 | r = sdma_v2_4_gfx_resume(adev); | 602 | r = sdma_v2_4_gfx_resume(adev); |
| @@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle) | |||
| 1012 | for (i = 0; i < adev->sdma.num_instances; i++) | 1027 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1013 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1028 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1014 | 1029 | ||
| 1030 | sdma_v2_4_free_microcode(adev); | ||
| 1015 | return 0; | 1031 | return 0; |
| 1016 | } | 1032 | } |
| 1017 | 1033 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31d99b0010f7..532ea88da66a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) | |||
| 236 | } | 236 | } |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) | ||
| 240 | { | ||
| 241 | int i; | ||
| 242 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 243 | release_firmware(adev->sdma.instance[i].fw); | ||
| 244 | adev->sdma.instance[i].fw = NULL; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 239 | /** | 248 | /** |
| 240 | * sdma_v3_0_init_microcode - load ucode images from disk | 249 | * sdma_v3_0_init_microcode - load ucode images from disk |
| 241 | * | 250 | * |
| @@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 672 | /* Initialize the ring buffer's read and write pointers */ | 681 | /* Initialize the ring buffer's read and write pointers */ |
| 673 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 682 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 674 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 683 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 684 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 685 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 675 | 686 | ||
| 676 | /* set the wb address whether it's enabled or not */ | 687 | /* set the wb address whether it's enabled or not */ |
| 677 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 688 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 711 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 722 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 712 | 723 | ||
| 713 | ring->ready = true; | 724 | ring->ready = true; |
| 725 | } | ||
| 726 | |||
| 727 | /* unhalt the MEs */ | ||
| 728 | sdma_v3_0_enable(adev, true); | ||
| 729 | /* enable sdma ring preemption */ | ||
| 730 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 714 | 731 | ||
| 732 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 733 | ring = &adev->sdma.instance[i].ring; | ||
| 715 | r = amdgpu_ring_test_ring(ring); | 734 | r = amdgpu_ring_test_ring(ring); |
| 716 | if (r) { | 735 | if (r) { |
| 717 | ring->ready = false; | 736 | ring->ready = false; |
| @@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) | |||
| 804 | } | 823 | } |
| 805 | } | 824 | } |
| 806 | 825 | ||
| 807 | /* unhalt the MEs */ | 826 | /* disble sdma engine before programing it */ |
| 808 | sdma_v3_0_enable(adev, true); | 827 | sdma_v3_0_ctx_switch_enable(adev, false); |
| 809 | /* enable sdma ring preemption */ | 828 | sdma_v3_0_enable(adev, false); |
| 810 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 811 | 829 | ||
| 812 | /* start the gfx rings and rlc compute queues */ | 830 | /* start the gfx rings and rlc compute queues */ |
| 813 | r = sdma_v3_0_gfx_resume(adev); | 831 | r = sdma_v3_0_gfx_resume(adev); |
| @@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle) | |||
| 1247 | for (i = 0; i < adev->sdma.num_instances; i++) | 1265 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1248 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1266 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1249 | 1267 | ||
| 1268 | sdma_v3_0_free_microcode(adev); | ||
| 1250 | return 0; | 1269 | return 0; |
| 1251 | } | 1270 | } |
| 1252 | 1271 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index b7615cefcac4..f06f6f4dc3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | |||
| @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) | |||
| 71 | 71 | ||
| 72 | static int tonga_dpm_sw_fini(void *handle) | 72 | static int tonga_dpm_sw_fini(void *handle) |
| 73 | { | 73 | { |
| 74 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 75 | |||
| 76 | release_firmware(adev->pm.fw); | ||
| 77 | adev->pm.fw = NULL; | ||
| 78 | |||
| 74 | return 0; | 79 | return 0; |
| 75 | } | 80 | } |
| 76 | 81 | ||
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 6080951d539d..afce1edbe250 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
| @@ -157,6 +157,7 @@ struct amd_ip_funcs { | |||
| 157 | int (*hw_init)(void *handle); | 157 | int (*hw_init)(void *handle); |
| 158 | /* tears down the hw state */ | 158 | /* tears down the hw state */ |
| 159 | int (*hw_fini)(void *handle); | 159 | int (*hw_fini)(void *handle); |
| 160 | void (*late_fini)(void *handle); | ||
| 160 | /* handles IP specific hw/sw changes for suspend */ | 161 | /* handles IP specific hw/sw changes for suspend */ |
| 161 | int (*suspend)(void *handle); | 162 | int (*suspend)(void *handle); |
| 162 | /* handles IP specific hw/sw changes for resume */ | 163 | /* handles IP specific hw/sw changes for resume */ |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index a461e155a160..7464daf89ca1 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
| @@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, | |||
| 581 | enum cgs_ucode_id type, | 581 | enum cgs_ucode_id type, |
| 582 | struct cgs_firmware_info *info); | 582 | struct cgs_firmware_info *info); |
| 583 | 583 | ||
| 584 | typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, | ||
| 585 | enum cgs_ucode_id type); | ||
| 586 | |||
| 584 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, | 587 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, |
| 585 | enum amd_ip_block_type block_type, | 588 | enum amd_ip_block_type block_type, |
| 586 | enum amd_powergating_state state); | 589 | enum amd_powergating_state state); |
| @@ -645,6 +648,7 @@ struct cgs_ops { | |||
| 645 | cgs_set_camera_voltages_t set_camera_voltages; | 648 | cgs_set_camera_voltages_t set_camera_voltages; |
| 646 | /* Firmware Info */ | 649 | /* Firmware Info */ |
| 647 | cgs_get_firmware_info get_firmware_info; | 650 | cgs_get_firmware_info get_firmware_info; |
| 651 | cgs_rel_firmware rel_firmware; | ||
| 648 | /* cg pg interface*/ | 652 | /* cg pg interface*/ |
| 649 | cgs_set_powergating_state set_powergating_state; | 653 | cgs_set_powergating_state set_powergating_state; |
| 650 | cgs_set_clockgating_state set_clockgating_state; | 654 | cgs_set_clockgating_state set_clockgating_state; |
| @@ -738,6 +742,8 @@ struct cgs_device | |||
| 738 | CGS_CALL(set_camera_voltages,dev,mask,voltages) | 742 | CGS_CALL(set_camera_voltages,dev,mask,voltages) |
| 739 | #define cgs_get_firmware_info(dev, type, info) \ | 743 | #define cgs_get_firmware_info(dev, type, info) \ |
| 740 | CGS_CALL(get_firmware_info, dev, type, info) | 744 | CGS_CALL(get_firmware_info, dev, type, info) |
| 745 | #define cgs_rel_firmware(dev, type) \ | ||
| 746 | CGS_CALL(rel_firmware, dev, type) | ||
| 741 | #define cgs_set_powergating_state(dev, block_type, state) \ | 747 | #define cgs_set_powergating_state(dev, block_type, state) \ |
| 742 | CGS_CALL(set_powergating_state, dev, block_type, state) | 748 | CGS_CALL(set_powergating_state, dev, block_type, state) |
| 743 | #define cgs_set_clockgating_state(dev, block_type, state) \ | 749 | #define cgs_set_clockgating_state(dev, block_type, state) \ |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8e345bfddb69..e629f8a9fe93 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
| @@ -73,11 +73,14 @@ static int pp_sw_init(void *handle) | |||
| 73 | 73 | ||
| 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); | 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); |
| 75 | if (ret) | 75 | if (ret) |
| 76 | goto err; | 76 | goto err1; |
| 77 | 77 | ||
| 78 | pr_info("amdgpu: powerplay initialized\n"); | 78 | pr_info("amdgpu: powerplay initialized\n"); |
| 79 | 79 | ||
| 80 | return 0; | 80 | return 0; |
| 81 | err1: | ||
| 82 | if (hwmgr->pptable_func->pptable_fini) | ||
| 83 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 81 | err: | 84 | err: |
| 82 | pr_err("amdgpu: powerplay initialization failed\n"); | 85 | pr_err("amdgpu: powerplay initialization failed\n"); |
| 83 | return ret; | 86 | return ret; |
| @@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle) | |||
| 100 | if (hwmgr->hwmgr_func->backend_fini != NULL) | 103 | if (hwmgr->hwmgr_func->backend_fini != NULL) |
| 101 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); | 104 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); |
| 102 | 105 | ||
| 106 | if (hwmgr->pptable_func->pptable_fini) | ||
| 107 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 108 | |||
| 103 | return ret; | 109 | return ret; |
| 104 | } | 110 | } |
| 105 | 111 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index 46410e3c7349..fb88e4e5d625 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c | |||
| @@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr) | |||
| 58 | pem_unregister_interrupts(eventmgr); | 58 | pem_unregister_interrupts(eventmgr); |
| 59 | 59 | ||
| 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); | 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); |
| 61 | |||
| 62 | if (eventmgr != NULL) | ||
| 63 | kfree(eventmgr); | ||
| 64 | } | 61 | } |
| 65 | 62 | ||
| 66 | int eventmgr_init(struct pp_instance *handle) | 63 | int eventmgr_init(struct pp_instance *handle) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 24a16e49b571..586f73276226 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | |||
| @@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) | |||
| 1830 | 1830 | ||
| 1831 | PP_ASSERT_WITH_CODE(false, | 1831 | PP_ASSERT_WITH_CODE(false, |
| 1832 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 1832 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 1833 | return vddci_table->entries[i].value); | 1833 | return vddci_table->entries[i-1].value); |
| 1834 | } | 1834 | } |
| 1835 | 1835 | ||
| 1836 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, | 1836 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 1c48917da3cf..20f20e075588 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) | |||
| 93 | if (hwmgr == NULL || hwmgr->ps == NULL) | 93 | if (hwmgr == NULL || hwmgr->ps == NULL) |
| 94 | return -EINVAL; | 94 | return -EINVAL; |
| 95 | 95 | ||
| 96 | /* do hwmgr finish*/ | ||
| 97 | kfree(hwmgr->backend); | ||
| 98 | |||
| 99 | kfree(hwmgr->start_thermal_controller.function_list); | ||
| 100 | |||
| 101 | kfree(hwmgr->set_temperature_range.function_list); | ||
| 102 | |||
| 96 | kfree(hwmgr->ps); | 103 | kfree(hwmgr->ps); |
| 97 | kfree(hwmgr); | 104 | kfree(hwmgr); |
| 98 | return 0; | 105 | return 0; |
| @@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u | |||
| 462 | 469 | ||
| 463 | PP_ASSERT_WITH_CODE(false, | 470 | PP_ASSERT_WITH_CODE(false, |
| 464 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 471 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 465 | return vddci_table->entries[i].value); | 472 | return vddci_table->entries[i-1].value); |
| 466 | } | 473 | } |
| 467 | 474 | ||
| 468 | int phm_find_boot_level(void *table, | 475 | int phm_find_boot_level(void *table, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index 0b99ab3ba0c5..ae96f14b827c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c | |||
| @@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) | |||
| 286 | 286 | ||
| 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, | 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, |
| 288 | (uint8_t *)&data->power_tune_table, | 288 | (uint8_t *)&data->power_tune_table, |
| 289 | sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) | 289 | (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) |
| 290 | PP_ASSERT_WITH_CODE(false, | 290 | PP_ASSERT_WITH_CODE(false, |
| 291 | "Attempt to download PmFuseTable Failed!", | 291 | "Attempt to download PmFuseTable Failed!", |
| 292 | return -EINVAL); | 292 | return -EINVAL); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 16fed487973b..d27e8c40602a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | |||
| @@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | |||
| 2847 | } | 2847 | } |
| 2848 | } | 2848 | } |
| 2849 | 2849 | ||
| 2850 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ | ||
| 2851 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | ||
| 2852 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; | ||
| 2853 | /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ | ||
| 2854 | /* param1 is for corresponding std voltage */ | ||
| 2855 | data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; | ||
| 2856 | } | ||
| 2857 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; | ||
| 2858 | |||
| 2859 | if (NULL != allowed_vdd_mclk_table) { | ||
| 2860 | /* Initialize Vddci DPM table based on allow Mclk values */ | ||
| 2861 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | ||
| 2862 | data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; | ||
| 2863 | data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; | ||
| 2864 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; | ||
| 2865 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; | ||
| 2866 | } | ||
| 2867 | data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; | ||
| 2868 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; | ||
| 2869 | } | ||
| 2870 | |||
| 2871 | /* setup PCIE gen speed levels*/ | 2850 | /* setup PCIE gen speed levels*/ |
| 2872 | tonga_setup_default_pcie_tables(hwmgr); | 2851 | tonga_setup_default_pcie_tables(hwmgr); |
| 2873 | 2852 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 10e3630ee39d..296ec7ef6d45 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c | |||
| @@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) | |||
| 1040 | struct phm_ppt_v1_information *pp_table_information = | 1040 | struct phm_ppt_v1_information *pp_table_information = |
| 1041 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1041 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1042 | 1042 | ||
| 1043 | if (NULL != hwmgr->soft_pp_table) { | 1043 | if (NULL != hwmgr->soft_pp_table) |
| 1044 | kfree(hwmgr->soft_pp_table); | ||
| 1045 | hwmgr->soft_pp_table = NULL; | 1044 | hwmgr->soft_pp_table = NULL; |
| 1046 | } | ||
| 1047 | 1045 | ||
| 1048 | if (NULL != pp_table_information->vdd_dep_on_sclk) | 1046 | kfree(pp_table_information->vdd_dep_on_sclk); |
| 1049 | pp_table_information->vdd_dep_on_sclk = NULL; | 1047 | pp_table_information->vdd_dep_on_sclk = NULL; |
| 1050 | 1048 | ||
| 1051 | if (NULL != pp_table_information->vdd_dep_on_mclk) | 1049 | kfree(pp_table_information->vdd_dep_on_mclk); |
| 1052 | pp_table_information->vdd_dep_on_mclk = NULL; | 1050 | pp_table_information->vdd_dep_on_mclk = NULL; |
| 1053 | 1051 | ||
| 1054 | if (NULL != pp_table_information->valid_mclk_values) | 1052 | kfree(pp_table_information->valid_mclk_values); |
| 1055 | pp_table_information->valid_mclk_values = NULL; | 1053 | pp_table_information->valid_mclk_values = NULL; |
| 1056 | 1054 | ||
| 1057 | if (NULL != pp_table_information->valid_sclk_values) | 1055 | kfree(pp_table_information->valid_sclk_values); |
| 1058 | pp_table_information->valid_sclk_values = NULL; | 1056 | pp_table_information->valid_sclk_values = NULL; |
| 1059 | 1057 | ||
| 1060 | if (NULL != pp_table_information->vddc_lookup_table) | 1058 | kfree(pp_table_information->vddc_lookup_table); |
| 1061 | pp_table_information->vddc_lookup_table = NULL; | 1059 | pp_table_information->vddc_lookup_table = NULL; |
| 1062 | 1060 | ||
| 1063 | if (NULL != pp_table_information->vddgfx_lookup_table) | 1061 | kfree(pp_table_information->vddgfx_lookup_table); |
| 1064 | pp_table_information->vddgfx_lookup_table = NULL; | 1062 | pp_table_information->vddgfx_lookup_table = NULL; |
| 1065 | 1063 | ||
| 1066 | if (NULL != pp_table_information->mm_dep_table) | 1064 | kfree(pp_table_information->mm_dep_table); |
| 1067 | pp_table_information->mm_dep_table = NULL; | 1065 | pp_table_information->mm_dep_table = NULL; |
| 1068 | 1066 | ||
| 1069 | if (NULL != pp_table_information->cac_dtp_table) | 1067 | kfree(pp_table_information->cac_dtp_table); |
| 1070 | pp_table_information->cac_dtp_table = NULL; | 1068 | pp_table_information->cac_dtp_table = NULL; |
| 1071 | 1069 | ||
| 1072 | if (NULL != hwmgr->dyn_state.cac_dtp_table) | 1070 | kfree(hwmgr->dyn_state.cac_dtp_table); |
| 1073 | hwmgr->dyn_state.cac_dtp_table = NULL; | 1071 | hwmgr->dyn_state.cac_dtp_table = NULL; |
| 1074 | 1072 | ||
| 1075 | if (NULL != pp_table_information->ppm_parameter_table) | 1073 | kfree(pp_table_information->ppm_parameter_table); |
| 1076 | pp_table_information->ppm_parameter_table = NULL; | 1074 | pp_table_information->ppm_parameter_table = NULL; |
| 1077 | 1075 | ||
| 1078 | if (NULL != pp_table_information->pcie_table) | 1076 | kfree(pp_table_information->pcie_table); |
| 1079 | pp_table_information->pcie_table = NULL; | 1077 | pp_table_information->pcie_table = NULL; |
| 1080 | 1078 | ||
| 1081 | if (NULL != hwmgr->pptable) { | 1079 | kfree(hwmgr->pptable); |
| 1082 | kfree(hwmgr->pptable); | 1080 | hwmgr->pptable = NULL; |
| 1083 | hwmgr->pptable = NULL; | ||
| 1084 | } | ||
| 1085 | 1081 | ||
| 1086 | return result; | 1082 | return result; |
| 1087 | } | 1083 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 673a75c74e18..8e52a2e82db5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | |||
| @@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) | |||
| 1006 | 1006 | ||
| 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) | 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) |
| 1008 | { | 1008 | { |
| 1009 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | ||
| 1010 | |||
| 1011 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 1012 | |||
| 1009 | if (smumgr->backend) { | 1013 | if (smumgr->backend) { |
| 1010 | kfree(smumgr->backend); | 1014 | kfree(smumgr->backend); |
| 1011 | smumgr->backend = NULL; | 1015 | smumgr->backend = NULL; |
| 1012 | } | 1016 | } |
| 1017 | |||
| 1018 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 1013 | return 0; | 1019 | return 0; |
| 1014 | } | 1020 | } |
| 1015 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index de618ead9db8..043b6ac09d5f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
| @@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr) | |||
| 469 | kfree(smumgr->backend); | 469 | kfree(smumgr->backend); |
| 470 | smumgr->backend = NULL; | 470 | smumgr->backend = NULL; |
| 471 | } | 471 | } |
| 472 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 472 | return 0; | 473 | return 0; |
| 473 | } | 474 | } |
| 474 | 475 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c483baf6b4fb..0728c1e3d97a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
| @@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) | |||
| 81 | 81 | ||
| 82 | int smum_fini(struct pp_smumgr *smumgr) | 82 | int smum_fini(struct pp_smumgr *smumgr) |
| 83 | { | 83 | { |
| 84 | kfree(smumgr->device); | ||
| 84 | kfree(smumgr); | 85 | kfree(smumgr); |
| 85 | return 0; | 86 | return 0; |
| 86 | } | 87 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 32820b680d88..b22722eabafc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | |||
| @@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, | |||
| 328 | 328 | ||
| 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) | 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) |
| 330 | { | 330 | { |
| 331 | struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); | ||
| 332 | |||
| 333 | smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); | ||
| 334 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 335 | |||
| 331 | if (smumgr->backend != NULL) { | 336 | if (smumgr->backend != NULL) { |
| 332 | kfree(smumgr->backend); | 337 | kfree(smumgr->backend); |
| 333 | smumgr->backend = NULL; | 338 | smumgr->backend = NULL; |
| 334 | } | 339 | } |
| 340 | |||
| 341 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 335 | return 0; | 342 | return 0; |
| 336 | } | 343 | } |
| 337 | 344 | ||
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index fef1b04c2aab..0813c2f06931 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c | |||
| @@ -33,8 +33,17 @@ | |||
| 33 | * | 33 | * |
| 34 | */ | 34 | */ |
| 35 | 35 | ||
| 36 | static void hdlcd_crtc_cleanup(struct drm_crtc *crtc) | ||
| 37 | { | ||
| 38 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | ||
| 39 | |||
| 40 | /* stop the controller on cleanup */ | ||
| 41 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); | ||
| 42 | drm_crtc_cleanup(crtc); | ||
| 43 | } | ||
| 44 | |||
| 36 | static const struct drm_crtc_funcs hdlcd_crtc_funcs = { | 45 | static const struct drm_crtc_funcs hdlcd_crtc_funcs = { |
| 37 | .destroy = drm_crtc_cleanup, | 46 | .destroy = hdlcd_crtc_cleanup, |
| 38 | .set_config = drm_atomic_helper_set_config, | 47 | .set_config = drm_atomic_helper_set_config, |
| 39 | .page_flip = drm_atomic_helper_page_flip, | 48 | .page_flip = drm_atomic_helper_page_flip, |
| 40 | .reset = drm_atomic_helper_crtc_reset, | 49 | .reset = drm_atomic_helper_crtc_reset, |
| @@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 97 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 106 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); |
| 98 | struct drm_display_mode *m = &crtc->state->adjusted_mode; | 107 | struct drm_display_mode *m = &crtc->state->adjusted_mode; |
| 99 | struct videomode vm; | 108 | struct videomode vm; |
| 100 | unsigned int polarities, line_length, err; | 109 | unsigned int polarities, err; |
| 101 | 110 | ||
| 102 | vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; | 111 | vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; |
| 103 | vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; | 112 | vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; |
| @@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 113 | if (m->flags & DRM_MODE_FLAG_PVSYNC) | 122 | if (m->flags & DRM_MODE_FLAG_PVSYNC) |
| 114 | polarities |= HDLCD_POLARITY_VSYNC; | 123 | polarities |= HDLCD_POLARITY_VSYNC; |
| 115 | 124 | ||
| 116 | line_length = crtc->primary->state->fb->pitches[0]; | ||
| 117 | |||
| 118 | /* Allow max number of outstanding requests and largest burst size */ | 125 | /* Allow max number of outstanding requests and largest burst size */ |
| 119 | hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, | 126 | hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, |
| 120 | HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); | 127 | HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); |
| 121 | 128 | ||
| 122 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length); | ||
| 123 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length); | ||
| 124 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1); | ||
| 125 | hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); | 129 | hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); |
| 126 | hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); | 130 | hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); |
| 127 | hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); | 131 | hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); |
| 128 | hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); | 132 | hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); |
| 133 | hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); | ||
| 129 | hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); | 134 | hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); |
| 130 | hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); | 135 | hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); |
| 131 | hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); | 136 | hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); |
| 132 | hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); | ||
| 133 | hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); | 137 | hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); |
| 134 | 138 | ||
| 135 | err = hdlcd_set_pxl_fmt(crtc); | 139 | err = hdlcd_set_pxl_fmt(crtc); |
| @@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc) | |||
| 144 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 148 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); |
| 145 | 149 | ||
| 146 | clk_prepare_enable(hdlcd->clk); | 150 | clk_prepare_enable(hdlcd->clk); |
| 151 | hdlcd_crtc_mode_set_nofb(crtc); | ||
| 147 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); | 152 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); |
| 148 | drm_crtc_vblank_on(crtc); | ||
| 149 | } | 153 | } |
| 150 | 154 | ||
| 151 | static void hdlcd_crtc_disable(struct drm_crtc *crtc) | 155 | static void hdlcd_crtc_disable(struct drm_crtc *crtc) |
| 152 | { | 156 | { |
| 153 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 157 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); |
| 154 | 158 | ||
| 155 | if (!crtc->primary->fb) | 159 | if (!crtc->state->active) |
| 156 | return; | 160 | return; |
| 157 | 161 | ||
| 158 | clk_disable_unprepare(hdlcd->clk); | ||
| 159 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); | 162 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); |
| 160 | drm_crtc_vblank_off(crtc); | 163 | clk_disable_unprepare(hdlcd->clk); |
| 161 | } | 164 | } |
| 162 | 165 | ||
| 163 | static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, | 166 | static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, |
| @@ -179,20 +182,17 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 179 | static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, | 182 | static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, |
| 180 | struct drm_crtc_state *state) | 183 | struct drm_crtc_state *state) |
| 181 | { | 184 | { |
| 182 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 185 | struct drm_pending_vblank_event *event = crtc->state->event; |
| 183 | unsigned long flags; | ||
| 184 | |||
| 185 | if (crtc->state->event) { | ||
| 186 | struct drm_pending_vblank_event *event = crtc->state->event; | ||
| 187 | 186 | ||
| 187 | if (event) { | ||
| 188 | crtc->state->event = NULL; | 188 | crtc->state->event = NULL; |
| 189 | event->pipe = drm_crtc_index(crtc); | ||
| 190 | |||
| 191 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
| 192 | 189 | ||
| 193 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 190 | spin_lock_irq(&crtc->dev->event_lock); |
| 194 | list_add_tail(&event->base.link, &hdlcd->event_list); | 191 | if (drm_crtc_vblank_get(crtc) == 0) |
| 195 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 192 | drm_crtc_arm_vblank_event(crtc, event); |
| 193 | else | ||
| 194 | drm_crtc_send_vblank_event(crtc, event); | ||
| 195 | spin_unlock_irq(&crtc->dev->event_lock); | ||
| 196 | } | 196 | } |
| 197 | } | 197 | } |
| 198 | 198 | ||
| @@ -225,6 +225,15 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { | |||
| 225 | static int hdlcd_plane_atomic_check(struct drm_plane *plane, | 225 | static int hdlcd_plane_atomic_check(struct drm_plane *plane, |
| 226 | struct drm_plane_state *state) | 226 | struct drm_plane_state *state) |
| 227 | { | 227 | { |
| 228 | u32 src_w, src_h; | ||
| 229 | |||
| 230 | src_w = state->src_w >> 16; | ||
| 231 | src_h = state->src_h >> 16; | ||
| 232 | |||
| 233 | /* we can't do any scaling of the plane source */ | ||
| 234 | if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) | ||
| 235 | return -EINVAL; | ||
| 236 | |||
| 228 | return 0; | 237 | return 0; |
| 229 | } | 238 | } |
| 230 | 239 | ||
| @@ -233,20 +242,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, | |||
| 233 | { | 242 | { |
| 234 | struct hdlcd_drm_private *hdlcd; | 243 | struct hdlcd_drm_private *hdlcd; |
| 235 | struct drm_gem_cma_object *gem; | 244 | struct drm_gem_cma_object *gem; |
| 245 | unsigned int depth, bpp; | ||
| 246 | u32 src_w, src_h, dest_w, dest_h; | ||
| 236 | dma_addr_t scanout_start; | 247 | dma_addr_t scanout_start; |
| 237 | 248 | ||
| 238 | if (!plane->state->crtc || !plane->state->fb) | 249 | if (!plane->state->fb) |
| 239 | return; | 250 | return; |
| 240 | 251 | ||
| 241 | hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); | 252 | drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp); |
| 253 | src_w = plane->state->src_w >> 16; | ||
| 254 | src_h = plane->state->src_h >> 16; | ||
| 255 | dest_w = plane->state->crtc_w; | ||
| 256 | dest_h = plane->state->crtc_h; | ||
| 242 | gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); | 257 | gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); |
| 243 | scanout_start = gem->paddr; | 258 | scanout_start = gem->paddr + plane->state->fb->offsets[0] + |
| 259 | plane->state->crtc_y * plane->state->fb->pitches[0] + | ||
| 260 | plane->state->crtc_x * bpp / 8; | ||
| 261 | |||
| 262 | hdlcd = plane->dev->dev_private; | ||
| 263 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]); | ||
| 264 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]); | ||
| 265 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); | ||
| 244 | hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); | 266 | hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); |
| 245 | } | 267 | } |
| 246 | 268 | ||
| 247 | static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { | 269 | static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { |
| 248 | .prepare_fb = NULL, | ||
| 249 | .cleanup_fb = NULL, | ||
| 250 | .atomic_check = hdlcd_plane_atomic_check, | 270 | .atomic_check = hdlcd_plane_atomic_check, |
| 251 | .atomic_update = hdlcd_plane_atomic_update, | 271 | .atomic_update = hdlcd_plane_atomic_update, |
| 252 | }; | 272 | }; |
| @@ -294,16 +314,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) | |||
| 294 | return plane; | 314 | return plane; |
| 295 | } | 315 | } |
| 296 | 316 | ||
| 297 | void hdlcd_crtc_suspend(struct drm_crtc *crtc) | ||
| 298 | { | ||
| 299 | hdlcd_crtc_disable(crtc); | ||
| 300 | } | ||
| 301 | |||
| 302 | void hdlcd_crtc_resume(struct drm_crtc *crtc) | ||
| 303 | { | ||
| 304 | hdlcd_crtc_enable(crtc); | ||
| 305 | } | ||
| 306 | |||
| 307 | int hdlcd_setup_crtc(struct drm_device *drm) | 317 | int hdlcd_setup_crtc(struct drm_device *drm) |
| 308 | { | 318 | { |
| 309 | struct hdlcd_drm_private *hdlcd = drm->dev_private; | 319 | struct hdlcd_drm_private *hdlcd = drm->dev_private; |
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index b987c63ba8d6..a6ca36f0096f 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c | |||
| @@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) | |||
| 49 | atomic_set(&hdlcd->dma_end_count, 0); | 49 | atomic_set(&hdlcd->dma_end_count, 0); |
| 50 | #endif | 50 | #endif |
| 51 | 51 | ||
| 52 | INIT_LIST_HEAD(&hdlcd->event_list); | ||
| 53 | |||
| 54 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 52 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 55 | hdlcd->mmio = devm_ioremap_resource(drm->dev, res); | 53 | hdlcd->mmio = devm_ioremap_resource(drm->dev, res); |
| 56 | if (IS_ERR(hdlcd->mmio)) { | 54 | if (IS_ERR(hdlcd->mmio)) { |
| @@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) | |||
| 84 | goto setup_fail; | 82 | goto setup_fail; |
| 85 | } | 83 | } |
| 86 | 84 | ||
| 87 | pm_runtime_enable(drm->dev); | ||
| 88 | |||
| 89 | pm_runtime_get_sync(drm->dev); | ||
| 90 | ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); | 85 | ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); |
| 91 | pm_runtime_put_sync(drm->dev); | ||
| 92 | if (ret < 0) { | 86 | if (ret < 0) { |
| 93 | DRM_ERROR("failed to install IRQ handler\n"); | 87 | DRM_ERROR("failed to install IRQ handler\n"); |
| 94 | goto irq_fail; | 88 | goto irq_fail; |
| @@ -164,24 +158,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg) | |||
| 164 | atomic_inc(&hdlcd->vsync_count); | 158 | atomic_inc(&hdlcd->vsync_count); |
| 165 | 159 | ||
| 166 | #endif | 160 | #endif |
| 167 | if (irq_status & HDLCD_INTERRUPT_VSYNC) { | 161 | if (irq_status & HDLCD_INTERRUPT_VSYNC) |
| 168 | bool events_sent = false; | ||
| 169 | unsigned long flags; | ||
| 170 | struct drm_pending_vblank_event *e, *t; | ||
| 171 | |||
| 172 | drm_crtc_handle_vblank(&hdlcd->crtc); | 162 | drm_crtc_handle_vblank(&hdlcd->crtc); |
| 173 | 163 | ||
| 174 | spin_lock_irqsave(&drm->event_lock, flags); | ||
| 175 | list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) { | ||
| 176 | list_del(&e->base.link); | ||
| 177 | drm_crtc_send_vblank_event(&hdlcd->crtc, e); | ||
| 178 | events_sent = true; | ||
| 179 | } | ||
| 180 | if (events_sent) | ||
| 181 | drm_crtc_vblank_put(&hdlcd->crtc); | ||
| 182 | spin_unlock_irqrestore(&drm->event_lock, flags); | ||
| 183 | } | ||
| 184 | |||
| 185 | /* acknowledge interrupt(s) */ | 164 | /* acknowledge interrupt(s) */ |
| 186 | hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); | 165 | hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); |
| 187 | 166 | ||
| @@ -275,6 +254,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) | |||
| 275 | static struct drm_info_list hdlcd_debugfs_list[] = { | 254 | static struct drm_info_list hdlcd_debugfs_list[] = { |
| 276 | { "interrupt_count", hdlcd_show_underrun_count, 0 }, | 255 | { "interrupt_count", hdlcd_show_underrun_count, 0 }, |
| 277 | { "clocks", hdlcd_show_pxlclock, 0 }, | 256 | { "clocks", hdlcd_show_pxlclock, 0 }, |
| 257 | { "fb", drm_fb_cma_debugfs_show, 0 }, | ||
| 278 | }; | 258 | }; |
| 279 | 259 | ||
| 280 | static int hdlcd_debugfs_init(struct drm_minor *minor) | 260 | static int hdlcd_debugfs_init(struct drm_minor *minor) |
| @@ -357,6 +337,8 @@ static int hdlcd_drm_bind(struct device *dev) | |||
| 357 | return -ENOMEM; | 337 | return -ENOMEM; |
| 358 | 338 | ||
| 359 | drm->dev_private = hdlcd; | 339 | drm->dev_private = hdlcd; |
| 340 | dev_set_drvdata(dev, drm); | ||
| 341 | |||
| 360 | hdlcd_setup_mode_config(drm); | 342 | hdlcd_setup_mode_config(drm); |
| 361 | ret = hdlcd_load(drm, 0); | 343 | ret = hdlcd_load(drm, 0); |
| 362 | if (ret) | 344 | if (ret) |
| @@ -366,14 +348,18 @@ static int hdlcd_drm_bind(struct device *dev) | |||
| 366 | if (ret) | 348 | if (ret) |
| 367 | goto err_unload; | 349 | goto err_unload; |
| 368 | 350 | ||
| 369 | dev_set_drvdata(dev, drm); | ||
| 370 | |||
| 371 | ret = component_bind_all(dev, drm); | 351 | ret = component_bind_all(dev, drm); |
| 372 | if (ret) { | 352 | if (ret) { |
| 373 | DRM_ERROR("Failed to bind all components\n"); | 353 | DRM_ERROR("Failed to bind all components\n"); |
| 374 | goto err_unregister; | 354 | goto err_unregister; |
| 375 | } | 355 | } |
| 376 | 356 | ||
| 357 | ret = pm_runtime_set_active(dev); | ||
| 358 | if (ret) | ||
| 359 | goto err_pm_active; | ||
| 360 | |||
| 361 | pm_runtime_enable(dev); | ||
| 362 | |||
| 377 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); | 363 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); |
| 378 | if (ret < 0) { | 364 | if (ret < 0) { |
| 379 | DRM_ERROR("failed to initialise vblank\n"); | 365 | DRM_ERROR("failed to initialise vblank\n"); |
| @@ -399,16 +385,16 @@ err_fbdev: | |||
| 399 | drm_mode_config_cleanup(drm); | 385 | drm_mode_config_cleanup(drm); |
| 400 | drm_vblank_cleanup(drm); | 386 | drm_vblank_cleanup(drm); |
| 401 | err_vblank: | 387 | err_vblank: |
| 388 | pm_runtime_disable(drm->dev); | ||
| 389 | err_pm_active: | ||
| 402 | component_unbind_all(dev, drm); | 390 | component_unbind_all(dev, drm); |
| 403 | err_unregister: | 391 | err_unregister: |
| 404 | drm_dev_unregister(drm); | 392 | drm_dev_unregister(drm); |
| 405 | err_unload: | 393 | err_unload: |
| 406 | pm_runtime_get_sync(drm->dev); | ||
| 407 | drm_irq_uninstall(drm); | 394 | drm_irq_uninstall(drm); |
| 408 | pm_runtime_put_sync(drm->dev); | ||
| 409 | pm_runtime_disable(drm->dev); | ||
| 410 | of_reserved_mem_device_release(drm->dev); | 395 | of_reserved_mem_device_release(drm->dev); |
| 411 | err_free: | 396 | err_free: |
| 397 | dev_set_drvdata(dev, NULL); | ||
| 412 | drm_dev_unref(drm); | 398 | drm_dev_unref(drm); |
| 413 | 399 | ||
| 414 | return ret; | 400 | return ret; |
| @@ -495,30 +481,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match); | |||
| 495 | static int __maybe_unused hdlcd_pm_suspend(struct device *dev) | 481 | static int __maybe_unused hdlcd_pm_suspend(struct device *dev) |
| 496 | { | 482 | { |
| 497 | struct drm_device *drm = dev_get_drvdata(dev); | 483 | struct drm_device *drm = dev_get_drvdata(dev); |
| 498 | struct drm_crtc *crtc; | 484 | struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; |
| 499 | 485 | ||
| 500 | if (pm_runtime_suspended(dev)) | 486 | if (!hdlcd) |
| 501 | return 0; | 487 | return 0; |
| 502 | 488 | ||
| 503 | drm_modeset_lock_all(drm); | 489 | drm_kms_helper_poll_disable(drm); |
| 504 | list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) | 490 | |
| 505 | hdlcd_crtc_suspend(crtc); | 491 | hdlcd->state = drm_atomic_helper_suspend(drm); |
| 506 | drm_modeset_unlock_all(drm); | 492 | if (IS_ERR(hdlcd->state)) { |
| 493 | drm_kms_helper_poll_enable(drm); | ||
| 494 | return PTR_ERR(hdlcd->state); | ||
| 495 | } | ||
| 496 | |||
| 507 | return 0; | 497 | return 0; |
| 508 | } | 498 | } |
| 509 | 499 | ||
| 510 | static int __maybe_unused hdlcd_pm_resume(struct device *dev) | 500 | static int __maybe_unused hdlcd_pm_resume(struct device *dev) |
| 511 | { | 501 | { |
| 512 | struct drm_device *drm = dev_get_drvdata(dev); | 502 | struct drm_device *drm = dev_get_drvdata(dev); |
| 513 | struct drm_crtc *crtc; | 503 | struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; |
| 514 | 504 | ||
| 515 | if (!pm_runtime_suspended(dev)) | 505 | if (!hdlcd) |
| 516 | return 0; | 506 | return 0; |
| 517 | 507 | ||
| 518 | drm_modeset_lock_all(drm); | 508 | drm_atomic_helper_resume(drm, hdlcd->state); |
| 519 | list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) | 509 | drm_kms_helper_poll_enable(drm); |
| 520 | hdlcd_crtc_resume(crtc); | 510 | pm_runtime_set_active(dev); |
| 521 | drm_modeset_unlock_all(drm); | 511 | |
| 522 | return 0; | 512 | return 0; |
| 523 | } | 513 | } |
| 524 | 514 | ||
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h index aa234784f053..e3950a071152 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.h +++ b/drivers/gpu/drm/arm/hdlcd_drv.h | |||
| @@ -9,10 +9,9 @@ struct hdlcd_drm_private { | |||
| 9 | void __iomem *mmio; | 9 | void __iomem *mmio; |
| 10 | struct clk *clk; | 10 | struct clk *clk; |
| 11 | struct drm_fbdev_cma *fbdev; | 11 | struct drm_fbdev_cma *fbdev; |
| 12 | struct drm_framebuffer *fb; | ||
| 13 | struct list_head event_list; | ||
| 14 | struct drm_crtc crtc; | 12 | struct drm_crtc crtc; |
| 15 | struct drm_plane *plane; | 13 | struct drm_plane *plane; |
| 14 | struct drm_atomic_state *state; | ||
| 16 | #ifdef CONFIG_DEBUG_FS | 15 | #ifdef CONFIG_DEBUG_FS |
| 17 | atomic_t buffer_underrun_count; | 16 | atomic_t buffer_underrun_count; |
| 18 | atomic_t bus_error_count; | 17 | atomic_t bus_error_count; |
| @@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg) | |||
| 36 | 35 | ||
| 37 | int hdlcd_setup_crtc(struct drm_device *dev); | 36 | int hdlcd_setup_crtc(struct drm_device *dev); |
| 38 | void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); | 37 | void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); |
| 39 | void hdlcd_crtc_suspend(struct drm_crtc *crtc); | ||
| 40 | void hdlcd_crtc_resume(struct drm_crtc *crtc); | ||
| 41 | 38 | ||
| 42 | #endif /* __HDLCD_DRV_H__ */ | 39 | #endif /* __HDLCD_DRV_H__ */ |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index cf23a755f777..bd12231ab0cd 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | |||
| @@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) | |||
| 391 | { | 391 | { |
| 392 | struct atmel_hlcdc_crtc_state *state; | 392 | struct atmel_hlcdc_crtc_state *state; |
| 393 | 393 | ||
| 394 | if (crtc->state && crtc->state->mode_blob) | ||
| 395 | drm_property_unreference_blob(crtc->state->mode_blob); | ||
| 396 | |||
| 397 | if (crtc->state) { | 394 | if (crtc->state) { |
| 395 | __drm_atomic_helper_crtc_destroy_state(crtc->state); | ||
| 398 | state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); | 396 | state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); |
| 399 | kfree(state); | 397 | kfree(state); |
| 398 | crtc->state = NULL; | ||
| 400 | } | 399 | } |
| 401 | 400 | ||
| 402 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 401 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
| @@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc) | |||
| 415 | return NULL; | 414 | return NULL; |
| 416 | 415 | ||
| 417 | state = kmalloc(sizeof(*state), GFP_KERNEL); | 416 | state = kmalloc(sizeof(*state), GFP_KERNEL); |
| 418 | if (state) | 417 | if (!state) |
| 419 | __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); | 418 | return NULL; |
| 419 | __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); | ||
| 420 | 420 | ||
| 421 | cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); | 421 | cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); |
| 422 | state->output_mode = cur->output_mode; | 422 | state->output_mode = cur->output_mode; |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3ff1ed7b33db..c204ef32df16 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -351,6 +351,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, | |||
| 351 | drm_property_unreference_blob(state->mode_blob); | 351 | drm_property_unreference_blob(state->mode_blob); |
| 352 | state->mode_blob = NULL; | 352 | state->mode_blob = NULL; |
| 353 | 353 | ||
| 354 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 355 | |||
| 354 | if (blob) { | 356 | if (blob) { |
| 355 | if (blob->length != sizeof(struct drm_mode_modeinfo) || | 357 | if (blob->length != sizeof(struct drm_mode_modeinfo) || |
| 356 | drm_mode_convert_umode(&state->mode, | 358 | drm_mode_convert_umode(&state->mode, |
| @@ -363,7 +365,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, | |||
| 363 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", | 365 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", |
| 364 | state->mode.name, state); | 366 | state->mode.name, state); |
| 365 | } else { | 367 | } else { |
| 366 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 367 | state->enable = false; | 368 | state->enable = false; |
| 368 | DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", | 369 | DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", |
| 369 | state); | 370 | state); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d2a6d958ca76..0e3cc66aa8b7 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -2821,8 +2821,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
| 2821 | goto out; | 2821 | goto out; |
| 2822 | } | 2822 | } |
| 2823 | 2823 | ||
| 2824 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
| 2825 | |||
| 2826 | /* | 2824 | /* |
| 2827 | * Check whether the primary plane supports the fb pixel format. | 2825 | * Check whether the primary plane supports the fb pixel format. |
| 2828 | * Drivers not implementing the universal planes API use a | 2826 | * Drivers not implementing the universal planes API use a |
| @@ -4841,7 +4839,8 @@ bool drm_property_change_valid_get(struct drm_property *property, | |||
| 4841 | if (value == 0) | 4839 | if (value == 0) |
| 4842 | return true; | 4840 | return true; |
| 4843 | 4841 | ||
| 4844 | return _object_find(property->dev, value, property->values[0]) != NULL; | 4842 | *ref = _object_find(property->dev, value, property->values[0]); |
| 4843 | return *ref != NULL; | ||
| 4845 | } | 4844 | } |
| 4846 | 4845 | ||
| 4847 | for (i = 0; i < property->num_values; i++) | 4846 | for (i = 0; i < property->num_values; i++) |
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 172cafe11c71..5075fae3c4e2 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c | |||
| @@ -445,7 +445,7 @@ err_cma_destroy: | |||
| 445 | err_fb_info_destroy: | 445 | err_fb_info_destroy: |
| 446 | drm_fb_helper_release_fbi(helper); | 446 | drm_fb_helper_release_fbi(helper); |
| 447 | err_gem_free_object: | 447 | err_gem_free_object: |
| 448 | dev->driver->gem_free_object(&obj->base); | 448 | drm_gem_object_unreference_unlocked(&obj->base); |
| 449 | return ret; | 449 | return ret; |
| 450 | } | 450 | } |
| 451 | EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); | 451 | EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); |
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index e1ab008b3f08..1d6c335584ec 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c | |||
| @@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, | |||
| 121 | return cma_obj; | 121 | return cma_obj; |
| 122 | 122 | ||
| 123 | error: | 123 | error: |
| 124 | drm->driver->gem_free_object(&cma_obj->base); | 124 | drm_gem_object_unreference_unlocked(&cma_obj->base); |
| 125 | return ERR_PTR(ret); | 125 | return ERR_PTR(ret); |
| 126 | } | 126 | } |
| 127 | EXPORT_SYMBOL_GPL(drm_gem_cma_create); | 127 | EXPORT_SYMBOL_GPL(drm_gem_cma_create); |
| @@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, | |||
| 162 | * and handle has the id what user can see. | 162 | * and handle has the id what user can see. |
| 163 | */ | 163 | */ |
| 164 | ret = drm_gem_handle_create(file_priv, gem_obj, handle); | 164 | ret = drm_gem_handle_create(file_priv, gem_obj, handle); |
| 165 | if (ret) | ||
| 166 | goto err_handle_create; | ||
| 167 | |||
| 168 | /* drop reference from allocate - handle holds it now. */ | 165 | /* drop reference from allocate - handle holds it now. */ |
| 169 | drm_gem_object_unreference_unlocked(gem_obj); | 166 | drm_gem_object_unreference_unlocked(gem_obj); |
| 167 | if (ret) | ||
| 168 | return ERR_PTR(ret); | ||
| 170 | 169 | ||
| 171 | return cma_obj; | 170 | return cma_obj; |
| 172 | |||
| 173 | err_handle_create: | ||
| 174 | drm->driver->gem_free_object(gem_obj); | ||
| 175 | |||
| 176 | return ERR_PTR(ret); | ||
| 177 | } | 171 | } |
| 178 | 172 | ||
| 179 | /** | 173 | /** |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 7def3d58da18..e5e6f504d8cc 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out, | |||
| 1518 | if (out->status != MODE_OK) | 1518 | if (out->status != MODE_OK) |
| 1519 | goto out; | 1519 | goto out; |
| 1520 | 1520 | ||
| 1521 | drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V); | ||
| 1522 | |||
| 1521 | ret = 0; | 1523 | ret = 0; |
| 1522 | 1524 | ||
| 1523 | out: | 1525 | out: |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0ec1ad961e0d..dc723f7ead7d 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | |||
| @@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = { | |||
| 42 | .reg_bits = 32, | 42 | .reg_bits = 32, |
| 43 | .reg_stride = 4, | 43 | .reg_stride = 4, |
| 44 | .val_bits = 32, | 44 | .val_bits = 32, |
| 45 | .cache_type = REGCACHE_RBTREE, | 45 | .cache_type = REGCACHE_FLAT, |
| 46 | 46 | ||
| 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, | 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, |
| 48 | .max_register = 0x11fc, | ||
| 48 | }; | 49 | }; |
| 49 | 50 | ||
| 50 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) | 51 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 1f14b602882b..82656654fb21 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
| @@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc) | |||
| 97 | return NULL; | 97 | return NULL; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, | 100 | int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, |
| 101 | int hsync_pin, int vsync_pin) | 101 | int hsync_pin, int vsync_pin, u32 bus_flags) |
| 102 | { | 102 | { |
| 103 | struct imx_drm_crtc_helper_funcs *helper; | 103 | struct imx_drm_crtc_helper_funcs *helper; |
| 104 | struct imx_drm_crtc *imx_crtc; | 104 | struct imx_drm_crtc *imx_crtc; |
| @@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, | |||
| 110 | helper = &imx_crtc->imx_drm_helper_funcs; | 110 | helper = &imx_crtc->imx_drm_helper_funcs; |
| 111 | if (helper->set_interface_pix_fmt) | 111 | if (helper->set_interface_pix_fmt) |
| 112 | return helper->set_interface_pix_fmt(encoder->crtc, | 112 | return helper->set_interface_pix_fmt(encoder->crtc, |
| 113 | bus_format, hsync_pin, vsync_pin); | 113 | bus_format, hsync_pin, vsync_pin, |
| 114 | bus_flags); | ||
| 114 | return 0; | 115 | return 0; |
| 115 | } | 116 | } |
| 116 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins); | 117 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_config); |
| 117 | 118 | ||
| 118 | int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) | 119 | int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) |
| 119 | { | 120 | { |
| 120 | return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3); | 121 | return imx_drm_set_bus_config(encoder, bus_format, 2, 3, |
| 122 | DRM_BUS_FLAG_DE_HIGH | | ||
| 123 | DRM_BUS_FLAG_PIXDATA_NEGEDGE); | ||
| 121 | } | 124 | } |
| 122 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); | 125 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); |
| 123 | 126 | ||
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index b0241b9d1334..74320a1723b7 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h | |||
| @@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs { | |||
| 19 | int (*enable_vblank)(struct drm_crtc *crtc); | 19 | int (*enable_vblank)(struct drm_crtc *crtc); |
| 20 | void (*disable_vblank)(struct drm_crtc *crtc); | 20 | void (*disable_vblank)(struct drm_crtc *crtc); |
| 21 | int (*set_interface_pix_fmt)(struct drm_crtc *crtc, | 21 | int (*set_interface_pix_fmt)(struct drm_crtc *crtc, |
| 22 | u32 bus_format, int hsync_pin, int vsync_pin); | 22 | u32 bus_format, int hsync_pin, int vsync_pin, |
| 23 | u32 bus_flags); | ||
| 23 | const struct drm_crtc_helper_funcs *crtc_helper_funcs; | 24 | const struct drm_crtc_helper_funcs *crtc_helper_funcs; |
| 24 | const struct drm_crtc_funcs *crtc_funcs; | 25 | const struct drm_crtc_funcs *crtc_funcs; |
| 25 | }; | 26 | }; |
| @@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm); | |||
| 41 | 42 | ||
| 42 | struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); | 43 | struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); |
| 43 | 44 | ||
| 44 | int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, | 45 | int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, |
| 45 | u32 bus_format, int hsync_pin, int vsync_pin); | 46 | int hsync_pin, int vsync_pin, u32 bus_flags); |
| 46 | int imx_drm_set_bus_format(struct drm_encoder *encoder, | 47 | int imx_drm_set_bus_format(struct drm_encoder *encoder, |
| 47 | u32 bus_format); | 48 | u32 bus_format); |
| 48 | 49 | ||
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index a58eee59550a..beff793bb717 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 25 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
| 26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
| 27 | #include <linux/of_graph.h> | 27 | #include <linux/of_graph.h> |
| 28 | #include <video/of_display_timing.h> | ||
| 28 | #include <video/of_videomode.h> | 29 | #include <video/of_videomode.h> |
| 29 | #include <linux/regmap.h> | 30 | #include <linux/regmap.h> |
| 30 | #include <linux/videodev2.h> | 31 | #include <linux/videodev2.h> |
| @@ -59,6 +60,7 @@ struct imx_ldb_channel { | |||
| 59 | struct drm_encoder encoder; | 60 | struct drm_encoder encoder; |
| 60 | struct drm_panel *panel; | 61 | struct drm_panel *panel; |
| 61 | struct device_node *child; | 62 | struct device_node *child; |
| 63 | struct i2c_adapter *ddc; | ||
| 62 | int chno; | 64 | int chno; |
| 63 | void *edid; | 65 | void *edid; |
| 64 | int edid_len; | 66 | int edid_len; |
| @@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector) | |||
| 107 | return num_modes; | 109 | return num_modes; |
| 108 | } | 110 | } |
| 109 | 111 | ||
| 112 | if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) | ||
| 113 | imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); | ||
| 114 | |||
| 110 | if (imx_ldb_ch->edid) { | 115 | if (imx_ldb_ch->edid) { |
| 111 | drm_mode_connector_update_edid_property(connector, | 116 | drm_mode_connector_update_edid_property(connector, |
| 112 | imx_ldb_ch->edid); | 117 | imx_ldb_ch->edid); |
| @@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 553 | 558 | ||
| 554 | for_each_child_of_node(np, child) { | 559 | for_each_child_of_node(np, child) { |
| 555 | struct imx_ldb_channel *channel; | 560 | struct imx_ldb_channel *channel; |
| 556 | struct device_node *port; | 561 | struct device_node *ddc_node; |
| 562 | struct device_node *ep; | ||
| 557 | 563 | ||
| 558 | ret = of_property_read_u32(child, "reg", &i); | 564 | ret = of_property_read_u32(child, "reg", &i); |
| 559 | if (ret || i < 0 || i > 1) | 565 | if (ret || i < 0 || i > 1) |
| @@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 576 | * The output port is port@4 with an external 4-port mux or | 582 | * The output port is port@4 with an external 4-port mux or |
| 577 | * port@2 with the internal 2-port mux. | 583 | * port@2 with the internal 2-port mux. |
| 578 | */ | 584 | */ |
| 579 | port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2); | 585 | ep = of_graph_get_endpoint_by_regs(child, |
| 580 | if (port) { | 586 | imx_ldb->lvds_mux ? 4 : 2, |
| 581 | struct device_node *endpoint, *remote; | 587 | -1); |
| 582 | 588 | if (ep) { | |
| 583 | endpoint = of_get_child_by_name(port, "endpoint"); | 589 | struct device_node *remote; |
| 584 | if (endpoint) { | 590 | |
| 585 | remote = of_graph_get_remote_port_parent(endpoint); | 591 | remote = of_graph_get_remote_port_parent(ep); |
| 586 | if (remote) | 592 | of_node_put(ep); |
| 587 | channel->panel = of_drm_find_panel(remote); | 593 | if (remote) |
| 588 | else | 594 | channel->panel = of_drm_find_panel(remote); |
| 589 | return -EPROBE_DEFER; | 595 | else |
| 590 | if (!channel->panel) { | 596 | return -EPROBE_DEFER; |
| 591 | dev_err(dev, "panel not found: %s\n", | 597 | of_node_put(remote); |
| 592 | remote->full_name); | 598 | if (!channel->panel) { |
| 593 | return -EPROBE_DEFER; | 599 | dev_err(dev, "panel not found: %s\n", |
| 594 | } | 600 | remote->full_name); |
| 601 | return -EPROBE_DEFER; | ||
| 595 | } | 602 | } |
| 596 | } | 603 | } |
| 597 | 604 | ||
| 598 | edidp = of_get_property(child, "edid", &channel->edid_len); | 605 | ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); |
| 599 | if (edidp) { | 606 | if (ddc_node) { |
| 600 | channel->edid = kmemdup(edidp, channel->edid_len, | 607 | channel->ddc = of_find_i2c_adapter_by_node(ddc_node); |
| 601 | GFP_KERNEL); | 608 | of_node_put(ddc_node); |
| 602 | } else if (!channel->panel) { | 609 | if (!channel->ddc) { |
| 603 | ret = of_get_drm_display_mode(child, &channel->mode, 0); | 610 | dev_warn(dev, "failed to get ddc i2c adapter\n"); |
| 604 | if (!ret) | 611 | return -EPROBE_DEFER; |
| 605 | channel->mode_valid = 1; | 612 | } |
| 613 | } | ||
| 614 | |||
| 615 | if (!channel->ddc) { | ||
| 616 | /* if no DDC available, fallback to hardcoded EDID */ | ||
| 617 | dev_dbg(dev, "no ddc available\n"); | ||
| 618 | |||
| 619 | edidp = of_get_property(child, "edid", | ||
| 620 | &channel->edid_len); | ||
| 621 | if (edidp) { | ||
| 622 | channel->edid = kmemdup(edidp, | ||
| 623 | channel->edid_len, | ||
| 624 | GFP_KERNEL); | ||
| 625 | } else if (!channel->panel) { | ||
| 626 | /* fallback to display-timings node */ | ||
| 627 | ret = of_get_drm_display_mode(child, | ||
| 628 | &channel->mode, | ||
| 629 | OF_USE_NATIVE_MODE); | ||
| 630 | if (!ret) | ||
| 631 | channel->mode_valid = 1; | ||
| 632 | } | ||
| 606 | } | 633 | } |
| 607 | 634 | ||
| 608 | channel->bus_format = of_get_bus_format(dev, child); | 635 | channel->bus_format = of_get_bus_format(dev, child); |
| @@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, | |||
| 647 | channel->encoder.funcs->destroy(&channel->encoder); | 674 | channel->encoder.funcs->destroy(&channel->encoder); |
| 648 | 675 | ||
| 649 | kfree(channel->edid); | 676 | kfree(channel->edid); |
| 677 | i2c_put_adapter(channel->ddc); | ||
| 650 | } | 678 | } |
| 651 | } | 679 | } |
| 652 | 680 | ||
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index ae7a9fb3b8a2..baf788121287 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c | |||
| @@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder) | |||
| 294 | 294 | ||
| 295 | switch (tve->mode) { | 295 | switch (tve->mode) { |
| 296 | case TVE_MODE_VGA: | 296 | case TVE_MODE_VGA: |
| 297 | imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, | 297 | imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24, |
| 298 | tve->hsync_pin, tve->vsync_pin); | 298 | tve->hsync_pin, tve->vsync_pin, |
| 299 | DRM_BUS_FLAG_DE_HIGH | | ||
| 300 | DRM_BUS_FLAG_PIXDATA_NEGEDGE); | ||
| 299 | break; | 301 | break; |
| 300 | case TVE_MODE_TVOUT: | 302 | case TVE_MODE_TVOUT: |
| 301 | imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); | 303 | imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index b2c30b8d9816..fc040417e1e8 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
| @@ -66,6 +66,7 @@ struct ipu_crtc { | |||
| 66 | struct ipu_flip_work *flip_work; | 66 | struct ipu_flip_work *flip_work; |
| 67 | int irq; | 67 | int irq; |
| 68 | u32 bus_format; | 68 | u32 bus_format; |
| 69 | u32 bus_flags; | ||
| 69 | int di_hsync_pin; | 70 | int di_hsync_pin; |
| 70 | int di_vsync_pin; | 71 | int di_vsync_pin; |
| 71 | }; | 72 | }; |
| @@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, | |||
| 271 | else | 272 | else |
| 272 | sig_cfg.clkflags = 0; | 273 | sig_cfg.clkflags = 0; |
| 273 | 274 | ||
| 274 | sig_cfg.enable_pol = 1; | 275 | sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW); |
| 275 | sig_cfg.clk_pol = 0; | 276 | /* Default to driving pixel data on negative clock edges */ |
| 277 | sig_cfg.clk_pol = !!(ipu_crtc->bus_flags & | ||
| 278 | DRM_BUS_FLAG_PIXDATA_POSEDGE); | ||
| 276 | sig_cfg.bus_format = ipu_crtc->bus_format; | 279 | sig_cfg.bus_format = ipu_crtc->bus_format; |
| 277 | sig_cfg.v_to_h_sync = 0; | 280 | sig_cfg.v_to_h_sync = 0; |
| 278 | sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; | 281 | sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; |
| @@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) | |||
| 396 | } | 399 | } |
| 397 | 400 | ||
| 398 | static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, | 401 | static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, |
| 399 | u32 bus_format, int hsync_pin, int vsync_pin) | 402 | u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags) |
| 400 | { | 403 | { |
| 401 | struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); | 404 | struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); |
| 402 | 405 | ||
| 403 | ipu_crtc->bus_format = bus_format; | 406 | ipu_crtc->bus_format = bus_format; |
| 407 | ipu_crtc->bus_flags = bus_flags; | ||
| 404 | ipu_crtc->di_hsync_pin = hsync_pin; | 408 | ipu_crtc->di_hsync_pin = hsync_pin; |
| 405 | ipu_crtc->di_vsync_pin = vsync_pin; | 409 | ipu_crtc->di_vsync_pin = vsync_pin; |
| 406 | 410 | ||
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 681ec6eb77d9..a4bb44118d33 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
| @@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = { | |||
| 38 | DRM_FORMAT_RGBX8888, | 38 | DRM_FORMAT_RGBX8888, |
| 39 | DRM_FORMAT_BGRA8888, | 39 | DRM_FORMAT_BGRA8888, |
| 40 | DRM_FORMAT_BGRA8888, | 40 | DRM_FORMAT_BGRA8888, |
| 41 | DRM_FORMAT_UYVY, | ||
| 42 | DRM_FORMAT_VYUY, | ||
| 41 | DRM_FORMAT_YUYV, | 43 | DRM_FORMAT_YUYV, |
| 42 | DRM_FORMAT_YVYU, | 44 | DRM_FORMAT_YVYU, |
| 43 | DRM_FORMAT_YUV420, | 45 | DRM_FORMAT_YUV420, |
| @@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 428 | if (crtc != plane->crtc) | 430 | if (crtc != plane->crtc) |
| 429 | dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", | 431 | dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", |
| 430 | plane->crtc, crtc); | 432 | plane->crtc, crtc); |
| 431 | plane->crtc = crtc; | ||
| 432 | 433 | ||
| 433 | if (!ipu_plane->enabled) | 434 | if (!ipu_plane->enabled) |
| 434 | ipu_plane_enable(ipu_plane); | 435 | ipu_plane_enable(ipu_plane); |
| @@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane) | |||
| 461 | kfree(ipu_plane); | 462 | kfree(ipu_plane); |
| 462 | } | 463 | } |
| 463 | 464 | ||
| 464 | static struct drm_plane_funcs ipu_plane_funcs = { | 465 | static const struct drm_plane_funcs ipu_plane_funcs = { |
| 465 | .update_plane = ipu_update_plane, | 466 | .update_plane = ipu_update_plane, |
| 466 | .disable_plane = ipu_disable_plane, | 467 | .disable_plane = ipu_disable_plane, |
| 467 | .destroy = ipu_plane_destroy, | 468 | .destroy = ipu_plane_destroy, |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 363e2c7741e2..2d1fd02cd3d6 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
| @@ -35,7 +35,6 @@ struct imx_parallel_display { | |||
| 35 | void *edid; | 35 | void *edid; |
| 36 | int edid_len; | 36 | int edid_len; |
| 37 | u32 bus_format; | 37 | u32 bus_format; |
| 38 | int mode_valid; | ||
| 39 | struct drm_display_mode mode; | 38 | struct drm_display_mode mode; |
| 40 | struct drm_panel *panel; | 39 | struct drm_panel *panel; |
| 41 | }; | 40 | }; |
| @@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) | |||
| 68 | num_modes = drm_add_edid_modes(connector, imxpd->edid); | 67 | num_modes = drm_add_edid_modes(connector, imxpd->edid); |
| 69 | } | 68 | } |
| 70 | 69 | ||
| 71 | if (imxpd->mode_valid) { | ||
| 72 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
| 73 | |||
| 74 | if (!mode) | ||
| 75 | return -EINVAL; | ||
| 76 | drm_mode_copy(mode, &imxpd->mode); | ||
| 77 | mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
| 78 | drm_mode_probed_add(connector, mode); | ||
| 79 | num_modes++; | ||
| 80 | } | ||
| 81 | |||
| 82 | if (np) { | 70 | if (np) { |
| 83 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | 71 | struct drm_display_mode *mode = drm_mode_create(connector->dev); |
| 84 | 72 | ||
| @@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 115 | static void imx_pd_encoder_prepare(struct drm_encoder *encoder) | 103 | static void imx_pd_encoder_prepare(struct drm_encoder *encoder) |
| 116 | { | 104 | { |
| 117 | struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); | 105 | struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); |
| 118 | 106 | imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3, | |
| 119 | imx_drm_set_bus_format(encoder, imxpd->bus_format); | 107 | imxpd->connector.display_info.bus_flags); |
| 120 | } | 108 | } |
| 121 | 109 | ||
| 122 | static void imx_pd_encoder_commit(struct drm_encoder *encoder) | 110 | static void imx_pd_encoder_commit(struct drm_encoder *encoder) |
| @@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
| 203 | { | 191 | { |
| 204 | struct drm_device *drm = data; | 192 | struct drm_device *drm = data; |
| 205 | struct device_node *np = dev->of_node; | 193 | struct device_node *np = dev->of_node; |
| 206 | struct device_node *port; | 194 | struct device_node *ep; |
| 207 | const u8 *edidp; | 195 | const u8 *edidp; |
| 208 | struct imx_parallel_display *imxpd; | 196 | struct imx_parallel_display *imxpd; |
| 209 | int ret; | 197 | int ret; |
| @@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
| 230 | } | 218 | } |
| 231 | 219 | ||
| 232 | /* port@1 is the output port */ | 220 | /* port@1 is the output port */ |
| 233 | port = of_graph_get_port_by_id(np, 1); | 221 | ep = of_graph_get_endpoint_by_regs(np, 1, -1); |
| 234 | if (port) { | 222 | if (ep) { |
| 235 | struct device_node *endpoint, *remote; | 223 | struct device_node *remote; |
| 236 | 224 | ||
| 237 | endpoint = of_get_child_by_name(port, "endpoint"); | 225 | remote = of_graph_get_remote_port_parent(ep); |
| 238 | if (endpoint) { | 226 | of_node_put(ep); |
| 239 | remote = of_graph_get_remote_port_parent(endpoint); | 227 | if (remote) { |
| 240 | if (remote) | 228 | imxpd->panel = of_drm_find_panel(remote); |
| 241 | imxpd->panel = of_drm_find_panel(remote); | 229 | of_node_put(remote); |
| 242 | if (!imxpd->panel) | ||
| 243 | return -EPROBE_DEFER; | ||
| 244 | } | 230 | } |
| 231 | if (!imxpd->panel) | ||
| 232 | return -EPROBE_DEFER; | ||
| 245 | } | 233 | } |
| 246 | 234 | ||
| 247 | imxpd->dev = dev; | 235 | imxpd->dev = dev; |
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index d05ca7901315..0186e500d2a5 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c | |||
| @@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, | |||
| 432 | unsigned long pll_rate; | 432 | unsigned long pll_rate; |
| 433 | unsigned int factor; | 433 | unsigned int factor; |
| 434 | 434 | ||
| 435 | if (!dpi) { | ||
| 436 | dev_err(dpi->dev, "invalid argument\n"); | ||
| 437 | return -EINVAL; | ||
| 438 | } | ||
| 439 | |||
| 440 | pix_rate = 1000UL * mode->clock; | 435 | pix_rate = 1000UL * mode->clock; |
| 441 | if (mode->clock <= 74000) | 436 | if (mode->clock <= 74000) |
| 442 | factor = 8 * 3; | 437 | factor = 8 * 3; |
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 2d808e59fefd..769559124562 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c | |||
| @@ -695,10 +695,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) | |||
| 695 | { | 695 | { |
| 696 | drm_encoder_cleanup(&dsi->encoder); | 696 | drm_encoder_cleanup(&dsi->encoder); |
| 697 | /* Skip connector cleanup if creation was delegated to the bridge */ | 697 | /* Skip connector cleanup if creation was delegated to the bridge */ |
| 698 | if (dsi->conn.dev) { | 698 | if (dsi->conn.dev) |
| 699 | drm_connector_unregister(&dsi->conn); | ||
| 700 | drm_connector_cleanup(&dsi->conn); | 699 | drm_connector_cleanup(&dsi->conn); |
| 701 | } | ||
| 702 | } | 700 | } |
| 703 | 701 | ||
| 704 | static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) | 702 | static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 14e64e08909e..d347dca17267 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) | |||
| 182 | } | 182 | } |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | fvv = pllreffreq * testn / testm; | 185 | fvv = pllreffreq * (n + 1) / (m + 1); |
| 186 | fvv = (fvv - 800000) / 50000; | 186 | fvv = (fvv - 800000) / 50000; |
| 187 | 187 | ||
| 188 | if (fvv > 15) | 188 | if (fvv > 15) |
| @@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) | |||
| 202 | WREG_DAC(MGA1064_PIX_PLLC_M, m); | 202 | WREG_DAC(MGA1064_PIX_PLLC_M, m); |
| 203 | WREG_DAC(MGA1064_PIX_PLLC_N, n); | 203 | WREG_DAC(MGA1064_PIX_PLLC_N, n); |
| 204 | WREG_DAC(MGA1064_PIX_PLLC_P, p); | 204 | WREG_DAC(MGA1064_PIX_PLLC_P, p); |
| 205 | |||
| 206 | if (mdev->unique_rev_id >= 0x04) { | ||
| 207 | WREG_DAC(0x1a, 0x09); | ||
| 208 | msleep(20); | ||
| 209 | WREG_DAC(0x1a, 0x01); | ||
| 210 | |||
| 211 | } | ||
| 212 | |||
| 205 | return 0; | 213 | return 0; |
| 206 | } | 214 | } |
| 207 | 215 | ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index fbe304ee6c80..2aec27dbb5bb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); | 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); |
| 411 | if (!adreno_gpu->memptrs) { | 411 | if (IS_ERR(adreno_gpu->memptrs)) { |
| 412 | dev_err(drm->dev, "could not vmap memptrs\n"); | 412 | dev_err(drm->dev, "could not vmap memptrs\n"); |
| 413 | return -ENOMEM; | 413 | return -ENOMEM; |
| 414 | } | 414 | } |
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf3482e..c6cf837c5193 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c | |||
| @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
| 159 | dev->mode_config.fb_base = paddr; | 159 | dev->mode_config.fb_base = paddr; |
| 160 | 160 | ||
| 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); | 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); |
| 162 | if (IS_ERR(fbi->screen_base)) { | ||
| 163 | ret = PTR_ERR(fbi->screen_base); | ||
| 164 | goto fail_unlock; | ||
| 165 | } | ||
| 162 | fbi->screen_size = fbdev->bo->size; | 166 | fbi->screen_size = fbdev->bo->size; |
| 163 | fbi->fix.smem_start = paddr; | 167 | fbi->fix.smem_start = paddr; |
| 164 | fbi->fix.smem_len = fbdev->bo->size; | 168 | fbi->fix.smem_len = fbdev->bo->size; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7daf4054dd2b..69836f5685b1 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) | |||
| 398 | return ERR_CAST(pages); | 398 | return ERR_CAST(pages); |
| 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 401 | if (msm_obj->vaddr == NULL) | ||
| 402 | return ERR_PTR(-ENOMEM); | ||
| 401 | } | 403 | } |
| 402 | return msm_obj->vaddr; | 404 | return msm_obj->vaddr; |
| 403 | } | 405 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b89ca5174863..eb4bb8b2f3a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, | |||
| 40 | 40 | ||
| 41 | submit->dev = dev; | 41 | submit->dev = dev; |
| 42 | submit->gpu = gpu; | 42 | submit->gpu = gpu; |
| 43 | submit->fence = NULL; | ||
| 43 | submit->pid = get_pid(task_pid(current)); | 44 | submit->pid = get_pid(task_pid(current)); |
| 44 | 45 | ||
| 45 | /* initially, until copy_from_user() and bo lookup succeeds: */ | 46 | /* initially, until copy_from_user() and bo lookup succeeds: */ |
| 46 | submit->nr_bos = 0; | 47 | submit->nr_bos = 0; |
| 47 | submit->nr_cmds = 0; | 48 | submit->nr_cmds = 0; |
| 48 | 49 | ||
| 50 | INIT_LIST_HEAD(&submit->node); | ||
| 49 | INIT_LIST_HEAD(&submit->bo_list); | 51 | INIT_LIST_HEAD(&submit->bo_list); |
| 50 | ww_acquire_init(&submit->ticket, &reservation_ww_class); | 52 | ww_acquire_init(&submit->ticket, &reservation_ww_class); |
| 51 | 53 | ||
| @@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 75 | void __user *userptr = | 77 | void __user *userptr = |
| 76 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); | 78 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); |
| 77 | 79 | ||
| 80 | /* make sure we don't have garbage flags, in case we hit | ||
| 81 | * error path before flags is initialized: | ||
| 82 | */ | ||
| 83 | submit->bos[i].flags = 0; | ||
| 84 | |||
| 78 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | 85 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); |
| 79 | if (ret) { | 86 | if (ret) { |
| 80 | ret = -EFAULT; | 87 | ret = -EFAULT; |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index b48f73ac6389..0857710c2ff2 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) | |||
| 312 | struct msm_gem_object *obj = submit->bos[idx].obj; | 312 | struct msm_gem_object *obj = submit->bos[idx].obj; |
| 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); | 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); |
| 314 | 314 | ||
| 315 | if (IS_ERR(buf)) | ||
| 316 | continue; | ||
| 317 | |||
| 315 | buf += iova - submit->bos[idx].iova; | 318 | buf += iova - submit->bos[idx].iova; |
| 316 | 319 | ||
| 317 | rd_write_section(rd, RD_GPUADDR, | 320 | rd_write_section(rd, RD_GPUADDR, |
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 1f14b908b221..42f5359cf988 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c | |||
| @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | ring->start = msm_gem_vaddr_locked(ring->bo); | 42 | ring->start = msm_gem_vaddr_locked(ring->bo); |
| 43 | if (IS_ERR(ring->start)) { | ||
| 44 | ret = PTR_ERR(ring->start); | ||
| 45 | goto fail; | ||
| 46 | } | ||
| 43 | ring->end = ring->start + (size / 4); | 47 | ring->end = ring->start + (size / 4); |
| 44 | ring->cur = ring->start; | 48 | ring->cur = ring->start; |
| 45 | 49 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index c612dc1f1eb4..126a85cc81bc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
| @@ -16,9 +16,9 @@ enum nvkm_devidx { | |||
| 16 | NVKM_SUBDEV_MC, | 16 | NVKM_SUBDEV_MC, |
| 17 | NVKM_SUBDEV_BUS, | 17 | NVKM_SUBDEV_BUS, |
| 18 | NVKM_SUBDEV_TIMER, | 18 | NVKM_SUBDEV_TIMER, |
| 19 | NVKM_SUBDEV_INSTMEM, | ||
| 19 | NVKM_SUBDEV_FB, | 20 | NVKM_SUBDEV_FB, |
| 20 | NVKM_SUBDEV_LTC, | 21 | NVKM_SUBDEV_LTC, |
| 21 | NVKM_SUBDEV_INSTMEM, | ||
| 22 | NVKM_SUBDEV_MMU, | 22 | NVKM_SUBDEV_MMU, |
| 23 | NVKM_SUBDEV_BAR, | 23 | NVKM_SUBDEV_BAR, |
| 24 | NVKM_SUBDEV_PMU, | 24 | NVKM_SUBDEV_PMU, |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h index db10c11f0595..c5a6ebd5a478 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h | |||
| @@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask, | |||
| 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); | 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); |
| 26 | 26 | ||
| 27 | struct nvbios_ocfg { | 27 | struct nvbios_ocfg { |
| 28 | u16 match; | 28 | u8 proto; |
| 29 | u8 flags; | ||
| 29 | u16 clkcmp[2]; | 30 | u16 clkcmp[2]; |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
| @@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx, | |||
| 33 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 34 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); |
| 34 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, | 35 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, |
| 35 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 36 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 36 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type, | 37 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags, |
| 37 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 38 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 38 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); | 39 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); |
| 39 | #endif | 40 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 57aaf98a26f9..300ea03be8f0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -552,6 +552,7 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
| 552 | if (ret) | 552 | if (ret) |
| 553 | goto fini; | 553 | goto fini; |
| 554 | 554 | ||
| 555 | fbcon->helper.fbdev->pixmap.buf_align = 4; | ||
| 555 | return 0; | 556 | return 0; |
| 556 | 557 | ||
| 557 | fini: | 558 | fini: |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 0f3e4bb411cc..7d9248b8c664 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 82 | uint32_t fg; | 82 | uint32_t fg; |
| 83 | uint32_t bg; | 83 | uint32_t bg; |
| 84 | uint32_t dsize; | 84 | uint32_t dsize; |
| 85 | uint32_t width; | ||
| 86 | uint32_t *data = (uint32_t *)image->data; | 85 | uint32_t *data = (uint32_t *)image->data; |
| 87 | int ret; | 86 | int ret; |
| 88 | 87 | ||
| @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 93 | if (ret) | 92 | if (ret) |
| 94 | return ret; | 93 | return ret; |
| 95 | 94 | ||
| 96 | width = ALIGN(image->width, 8); | ||
| 97 | dsize = ALIGN(width * image->height, 32) >> 5; | ||
| 98 | |||
| 99 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 95 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 100 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 96 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| 101 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; | 97 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; |
| @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 111 | ((image->dx + image->width) & 0xffff)); | 107 | ((image->dx + image->width) & 0xffff)); |
| 112 | OUT_RING(chan, bg); | 108 | OUT_RING(chan, bg); |
| 113 | OUT_RING(chan, fg); | 109 | OUT_RING(chan, fg); |
| 114 | OUT_RING(chan, (image->height << 16) | width); | 110 | OUT_RING(chan, (image->height << 16) | image->width); |
| 115 | OUT_RING(chan, (image->height << 16) | image->width); | 111 | OUT_RING(chan, (image->height << 16) | image->width); |
| 116 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); | 112 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); |
| 117 | 113 | ||
| 114 | dsize = ALIGN(image->width * image->height, 32) >> 5; | ||
| 118 | while (dsize) { | 115 | while (dsize) { |
| 119 | int iter_len = dsize > 128 ? 128 : dsize; | 116 | int iter_len = dsize > 128 ? 128 : dsize; |
| 120 | 117 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 33d9ee0fac40..1aeb698e9707 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
| @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING(chan, 0); | 125 | OUT_RING(chan, 0); |
| 129 | OUT_RING(chan, image->dy); | 126 | OUT_RING(chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index a0913359ac05..839f4c8c1805 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c | |||
| @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING (chan, 0); | 125 | OUT_RING (chan, 0); |
| 129 | OUT_RING (chan, image->dy); | 126 | OUT_RING (chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index a74c5dd27dc0..e2a64ed14b22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | |||
| @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o | |||
| 18 | nvkm-y += nvkm/engine/disp/sornv50.o | 18 | nvkm-y += nvkm/engine/disp/sornv50.o |
| 19 | nvkm-y += nvkm/engine/disp/sorg94.o | 19 | nvkm-y += nvkm/engine/disp/sorg94.o |
| 20 | nvkm-y += nvkm/engine/disp/sorgf119.o | 20 | nvkm-y += nvkm/engine/disp/sorgf119.o |
| 21 | nvkm-y += nvkm/engine/disp/sorgm107.o | ||
| 21 | nvkm-y += nvkm/engine/disp/sorgm200.o | 22 | nvkm-y += nvkm/engine/disp/sorgm200.o |
| 22 | nvkm-y += nvkm/engine/disp/dport.o | 23 | nvkm-y += nvkm/engine/disp/dport.o |
| 23 | 24 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index f0314664349c..5dd34382f55a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c | |||
| @@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, | |||
| 76 | mask |= 0x0001 << or; | 76 | mask |= 0x0001 << or; |
| 77 | mask |= 0x0100 << head; | 77 | mask |= 0x0100 << head; |
| 78 | 78 | ||
| 79 | |||
| 79 | list_for_each_entry(outp, &disp->base.outp, head) { | 80 | list_for_each_entry(outp, &disp->base.outp, head) { |
| 80 | if ((outp->info.hasht & 0xff) == type && | 81 | if ((outp->info.hasht & 0xff) == type && |
| 81 | (outp->info.hashm & mask) == mask) { | 82 | (outp->info.hashm & mask) == mask) { |
| @@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 155 | if (!outp) | 156 | if (!outp) |
| 156 | return NULL; | 157 | return NULL; |
| 157 | 158 | ||
| 159 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 158 | switch (outp->info.type) { | 160 | switch (outp->info.type) { |
| 159 | case DCB_OUTPUT_TMDS: | 161 | case DCB_OUTPUT_TMDS: |
| 160 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 161 | if (*conf == 5) | 162 | if (*conf == 5) |
| 162 | *conf |= 0x0100; | 163 | *conf |= 0x0100; |
| 163 | break; | 164 | break; |
| 164 | case DCB_OUTPUT_LVDS: | 165 | case DCB_OUTPUT_LVDS: |
| 165 | *conf = disp->sor.lvdsconf; | 166 | *conf |= disp->sor.lvdsconf; |
| 166 | break; | ||
| 167 | case DCB_OUTPUT_DP: | ||
| 168 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 169 | break; | 167 | break; |
| 170 | case DCB_OUTPUT_ANALOG: | ||
| 171 | default: | 168 | default: |
| 172 | *conf = 0x00ff; | ||
| 173 | break; | 169 | break; |
| 174 | } | 170 | } |
| 175 | 171 | ||
| 176 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 172 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 173 | &ver, &hdr, &cnt, &len, &info2); | ||
| 177 | if (data && id < 0xff) { | 174 | if (data && id < 0xff) { |
| 178 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 175 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 179 | if (data) { | 176 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index b6944142d616..f4b9cf8574be 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c | |||
| @@ -36,7 +36,7 @@ gm107_disp = { | |||
| 36 | .outp.internal.crt = nv50_dac_output_new, | 36 | .outp.internal.crt = nv50_dac_output_new, |
| 37 | .outp.internal.tmds = nv50_sor_output_new, | 37 | .outp.internal.tmds = nv50_sor_output_new, |
| 38 | .outp.internal.lvds = nv50_sor_output_new, | 38 | .outp.internal.lvds = nv50_sor_output_new, |
| 39 | .outp.internal.dp = gf119_sor_dp_new, | 39 | .outp.internal.dp = gm107_sor_dp_new, |
| 40 | .dac.nr = 3, | 40 | .dac.nr = 3, |
| 41 | .dac.power = nv50_dac_power, | 41 | .dac.power = nv50_dac_power, |
| 42 | .dac.sense = nv50_dac_sense, | 42 | .dac.sense = nv50_dac_sense, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 4226d2153b9c..fcb1b0c46d64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | |||
| @@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 387 | if (!outp) | 387 | if (!outp) |
| 388 | return NULL; | 388 | return NULL; |
| 389 | 389 | ||
| 390 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 390 | if (outp->info.location == 0) { | 391 | if (outp->info.location == 0) { |
| 391 | switch (outp->info.type) { | 392 | switch (outp->info.type) { |
| 392 | case DCB_OUTPUT_TMDS: | 393 | case DCB_OUTPUT_TMDS: |
| 393 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 394 | if (*conf == 5) | 394 | if (*conf == 5) |
| 395 | *conf |= 0x0100; | 395 | *conf |= 0x0100; |
| 396 | break; | 396 | break; |
| 397 | case DCB_OUTPUT_LVDS: | 397 | case DCB_OUTPUT_LVDS: |
| 398 | *conf = disp->sor.lvdsconf; | 398 | *conf |= disp->sor.lvdsconf; |
| 399 | break; | 399 | break; |
| 400 | case DCB_OUTPUT_DP: | ||
| 401 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 402 | break; | ||
| 403 | case DCB_OUTPUT_ANALOG: | ||
| 404 | default: | 400 | default: |
| 405 | *conf = 0x00ff; | ||
| 406 | break; | 401 | break; |
| 407 | } | 402 | } |
| 408 | } else { | 403 | } else { |
| @@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 410 | pclk = pclk / 2; | 405 | pclk = pclk / 2; |
| 411 | } | 406 | } |
| 412 | 407 | ||
| 413 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 408 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 409 | &ver, &hdr, &cnt, &len, &info2); | ||
| 414 | if (data && id < 0xff) { | 410 | if (data && id < 0xff) { |
| 415 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 411 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 416 | if (data) { | 412 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h index e9067ba4e179..4e983f6d7032 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h | |||
| @@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int); | |||
| 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 63 | struct nvkm_output **); | 63 | struct nvkm_output **); |
| 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); | 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); |
| 65 | int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int); | ||
| 65 | 66 | ||
| 66 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 67 | int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 67 | struct nvkm_output **); | 68 | struct nvkm_output **); |
| 69 | int gm107_sor_dp_pattern(struct nvkm_output_dp *, int); | ||
| 70 | |||
| 71 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | ||
| 72 | struct nvkm_output **); | ||
| 68 | #endif | 73 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index b4b41b135643..22706c0a54b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c | |||
| @@ -40,8 +40,7 @@ static int | |||
| 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) |
| 41 | { | 41 | { |
| 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| 43 | const u32 loff = gf119_sor_loff(outp); | 43 | nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern); |
| 44 | nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); | ||
| 45 | return 0; | 44 | return 0; |
| 46 | } | 45 | } |
| 47 | 46 | ||
| @@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) | |||
| 64 | return 0; | 63 | return 0; |
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | static int | 66 | int |
| 68 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | 67 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, |
| 69 | int ln, int vs, int pe, int pc) | 68 | int ln, int vs, int pe, int pc) |
| 70 | { | 69 | { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c new file mode 100644 index 000000000000..37790b2617c5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | #include "nv50.h" | ||
| 25 | #include "outpdp.h" | ||
| 26 | |||
| 27 | int | ||
| 28 | gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 29 | { | ||
| 30 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 31 | const u32 soff = outp->base.or * 0x800; | ||
| 32 | const u32 data = 0x01010101 * pattern; | ||
| 33 | if (outp->base.info.sorconf.link & 1) | ||
| 34 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 35 | else | ||
| 36 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | |||
| 40 | static const struct nvkm_output_dp_func | ||
| 41 | gm107_sor_dp_func = { | ||
| 42 | .pattern = gm107_sor_dp_pattern, | ||
| 43 | .lnk_pwr = g94_sor_dp_lnk_pwr, | ||
| 44 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | ||
| 45 | .drv_ctl = gf119_sor_dp_drv_ctl, | ||
| 46 | }; | ||
| 47 | |||
| 48 | int | ||
| 49 | gm107_sor_dp_new(struct nvkm_disp *disp, int index, | ||
| 50 | struct dcb_output *dcbE, struct nvkm_output **poutp) | ||
| 51 | { | ||
| 52 | return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp); | ||
| 53 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c index 2cfbef9c344f..c44fa7ea672a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c | |||
| @@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane) | |||
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static int | 59 | static int |
| 60 | gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 61 | { | ||
| 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 63 | const u32 soff = gm200_sor_soff(outp); | ||
| 64 | const u32 data = 0x01010101 * pattern; | ||
| 65 | if (outp->base.info.sorconf.link & 1) | ||
| 66 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 67 | else | ||
| 68 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int | ||
| 73 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) | 60 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) |
| 74 | { | 61 | { |
| 75 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| @@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | |||
| 129 | 116 | ||
| 130 | static const struct nvkm_output_dp_func | 117 | static const struct nvkm_output_dp_func |
| 131 | gm200_sor_dp_func = { | 118 | gm200_sor_dp_func = { |
| 132 | .pattern = gm200_sor_dp_pattern, | 119 | .pattern = gm107_sor_dp_pattern, |
| 133 | .lnk_pwr = gm200_sor_dp_lnk_pwr, | 120 | .lnk_pwr = gm200_sor_dp_lnk_pwr, |
| 134 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | 121 | .lnk_ctl = gf119_sor_dp_lnk_ctl, |
| 135 | .drv_ctl = gm200_sor_dp_drv_ctl, | 122 | .drv_ctl = gm200_sor_dp_drv_ctl, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 9513badb8220..ae9ab5b1ab97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
| @@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) | |||
| 949 | } | 949 | } |
| 950 | 950 | ||
| 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { | 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { |
| 952 | { 0x00, "NO_ERROR" }, | 952 | { 0x01, "STACK_ERROR" }, |
| 953 | { 0x01, "STACK_MISMATCH" }, | 953 | { 0x02, "API_STACK_ERROR" }, |
| 954 | { 0x03, "RET_EMPTY_STACK_ERROR" }, | ||
| 955 | { 0x04, "PC_WRAP" }, | ||
| 954 | { 0x05, "MISALIGNED_PC" }, | 956 | { 0x05, "MISALIGNED_PC" }, |
| 955 | { 0x08, "MISALIGNED_GPR" }, | 957 | { 0x06, "PC_OVERFLOW" }, |
| 956 | { 0x09, "INVALID_OPCODE" }, | 958 | { 0x07, "MISALIGNED_IMMC_ADDR" }, |
| 957 | { 0x0d, "GPR_OUT_OF_BOUNDS" }, | 959 | { 0x08, "MISALIGNED_REG" }, |
| 958 | { 0x0e, "MEM_OUT_OF_BOUNDS" }, | 960 | { 0x09, "ILLEGAL_INSTR_ENCODING" }, |
| 959 | { 0x0f, "UNALIGNED_MEM_ACCESS" }, | 961 | { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, |
| 962 | { 0x0b, "ILLEGAL_INSTR_PARAM" }, | ||
| 963 | { 0x0c, "INVALID_CONST_ADDR" }, | ||
| 964 | { 0x0d, "OOR_REG" }, | ||
| 965 | { 0x0e, "OOR_ADDR" }, | ||
| 966 | { 0x0f, "MISALIGNED_ADDR" }, | ||
| 960 | { 0x10, "INVALID_ADDR_SPACE" }, | 967 | { 0x10, "INVALID_ADDR_SPACE" }, |
| 961 | { 0x11, "INVALID_PARAM" }, | 968 | { 0x11, "ILLEGAL_INSTR_PARAM2" }, |
| 969 | { 0x12, "INVALID_CONST_ADDR_LDC" }, | ||
| 970 | { 0x13, "GEOMETRY_SM_ERROR" }, | ||
| 971 | { 0x14, "DIVERGENT" }, | ||
| 972 | { 0x15, "WARP_EXIT" }, | ||
| 962 | {} | 973 | {} |
| 963 | }; | 974 | }; |
| 964 | 975 | ||
| 965 | static const struct nvkm_bitfield gf100_mp_global_error[] = { | 976 | static const struct nvkm_bitfield gf100_mp_global_error[] = { |
| 977 | { 0x00000001, "SM_TO_SM_FAULT" }, | ||
| 978 | { 0x00000002, "L1_ERROR" }, | ||
| 966 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, | 979 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, |
| 967 | { 0x00000008, "OUT_OF_STACK_SPACE" }, | 980 | { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, |
| 981 | { 0x00000010, "BPT_INT" }, | ||
| 982 | { 0x00000020, "BPT_PAUSE" }, | ||
| 983 | { 0x00000040, "SINGLE_STEP_COMPLETE" }, | ||
| 984 | { 0x20000000, "ECC_SEC_ERROR" }, | ||
| 985 | { 0x40000000, "ECC_DED_ERROR" }, | ||
| 986 | { 0x80000000, "TIMEOUT" }, | ||
| 968 | {} | 987 | {} |
| 969 | }; | 988 | }; |
| 970 | 989 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c index a5e92135cd77..9efb1b48cd54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c | |||
| @@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 141 | { | 141 | { |
| 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); | 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); |
| 143 | if (data) { | 143 | if (data) { |
| 144 | info->match = nvbios_rd16(bios, data + 0x00); | 144 | info->proto = nvbios_rd08(bios, data + 0x00); |
| 145 | info->flags = nvbios_rd16(bios, data + 0x01); | ||
| 145 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); | 146 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); |
| 146 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); | 147 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); |
| 147 | } | 148 | } |
| @@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 149 | } | 150 | } |
| 150 | 151 | ||
| 151 | u16 | 152 | u16 |
| 152 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type, | 153 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags, |
| 153 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) | 154 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) |
| 154 | { | 155 | { |
| 155 | u16 data, idx = 0; | 156 | u16 data, idx = 0; |
| 156 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { | 157 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { |
| 157 | if (info->match == type) | 158 | if ((info->proto == proto || info->proto == 0xff) && |
| 159 | (info->flags == flags)) | ||
| 158 | break; | 160 | break; |
| 159 | } | 161 | } |
| 160 | return data; | 162 | return data; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c index e292f5679418..389fb13a1998 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c | |||
| @@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) | |||
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static void | 71 | static void |
| 72 | gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s) | 72 | gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) |
| 73 | { | 73 | { |
| 74 | struct nvkm_subdev *subdev = <c->subdev; | 74 | struct nvkm_subdev *subdev = <c->subdev; |
| 75 | struct nvkm_device *device = subdev->device; | 75 | struct nvkm_device *device = subdev->device; |
| 76 | u32 base = 0x140000 + (c * 0x2000) + (s * 0x200); | 76 | u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); |
| 77 | u32 stat = nvkm_rd32(device, base + 0x00c); | 77 | u32 stat = nvkm_rd32(device, base + 0x00c); |
| 78 | 78 | ||
| 79 | if (stat) { | 79 | if (stat) { |
| @@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc) | |||
| 92 | while (mask) { | 92 | while (mask) { |
| 93 | u32 s, c = __ffs(mask); | 93 | u32 s, c = __ffs(mask); |
| 94 | for (s = 0; s < ltc->lts_nr; s++) | 94 | for (s = 0; s < ltc->lts_nr; s++) |
| 95 | gm107_ltc_lts_isr(ltc, c, s); | 95 | gm107_ltc_intr_lts(ltc, c, s); |
| 96 | mask &= ~(1 << c); | 96 | mask &= ~(1 << c); |
| 97 | } | 97 | } |
| 98 | } | 98 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c index 2a29bfd5125a..e18e0dc19ec8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c | |||
| @@ -46,7 +46,7 @@ static const struct nvkm_ltc_func | |||
| 46 | gm200_ltc = { | 46 | gm200_ltc = { |
| 47 | .oneinit = gm200_ltc_oneinit, | 47 | .oneinit = gm200_ltc_oneinit, |
| 48 | .init = gm200_ltc_init, | 48 | .init = gm200_ltc_init, |
| 49 | .intr = gm107_ltc_intr, /*XXX: not validated */ | 49 | .intr = gm107_ltc_intr, |
| 50 | .cbc_clear = gm107_ltc_cbc_clear, | 50 | .cbc_clear = gm107_ltc_cbc_clear, |
| 51 | .cbc_wait = gm107_ltc_cbc_wait, | 51 | .cbc_wait = gm107_ltc_cbc_wait, |
| 52 | .zbc = 16, | 52 | .zbc = 16, |
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 73241c4eb7aa..336ad4de9981 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig | |||
| @@ -2,6 +2,7 @@ config DRM_OMAP | |||
| 2 | tristate "OMAP DRM" | 2 | tristate "OMAP DRM" |
| 3 | depends on DRM | 3 | depends on DRM |
| 4 | depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM | 4 | depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM |
| 5 | select OMAP2_DSS | ||
| 5 | select DRM_KMS_HELPER | 6 | select DRM_KMS_HELPER |
| 6 | select DRM_KMS_FB_HELPER | 7 | select DRM_KMS_FB_HELPER |
| 7 | select FB_SYS_FILLRECT | 8 | select FB_SYS_FILLRECT |
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 225fd8d6ab31..667ca4a24ece 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/gpio/consumer.h> | ||
| 12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
| 13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 8c246c213e06..9594ff7a2b0c 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | * the Free Software Foundation. | 14 | * the Free Software Foundation. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio/consumer.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 2fd5602880a7..671806ca7d6a 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/gpio.h> | 12 | #include <linux/gpio/consumer.h> |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index e780fd4f8b46..7c2331be8d15 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/gpio.h> | 12 | #include <linux/gpio/consumer.h> |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 36485c2137ce..2b118071b5a1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/backlight.h> | 14 | #include <linux/backlight.h> |
| 15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
| 16 | #include <linux/fb.h> | 16 | #include <linux/fb.h> |
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio/consumer.h> |
| 18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 458f77bc473d..ac680e1de603 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/spi/spi.h> | 15 | #include <linux/spi/spi.h> |
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
| 18 | #include <linux/gpio/consumer.h> | ||
| 18 | 19 | ||
| 19 | #include <video/omapdss.h> | 20 | #include <video/omapdss.h> |
| 20 | #include <video/omap-panel-data.h> | 21 | #include <video/omap-panel-data.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 780cb263a318..38d2920a95e6 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
| 16 | #include <linux/spi/spi.h> | 16 | #include <linux/spi/spi.h> |
| 17 | #include <linux/fb.h> | 17 | #include <linux/fb.h> |
| 18 | #include <linux/gpio.h> | 18 | #include <linux/gpio/consumer.h> |
| 19 | #include <linux/of_gpio.h> | 19 | #include <linux/of_gpio.h> |
| 20 | 20 | ||
| 21 | #include <video/omapdss.h> | 21 | #include <video/omapdss.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 529a017602e4..4363fffc87e3 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
| 13 | #include <linux/gpio.h> | 13 | #include <linux/gpio/consumer.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
| 16 | #include <linux/of_gpio.h> | 16 | #include <linux/of_gpio.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 31efcca801bd..deb416736aad 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/backlight.h> | 30 | #include <linux/backlight.h> |
| 31 | #include <linux/fb.h> | 31 | #include <linux/fb.h> |
| 32 | #include <linux/gpio.h> | 32 | #include <linux/gpio/consumer.h> |
| 33 | #include <linux/of.h> | 33 | #include <linux/of.h> |
| 34 | #include <linux/of_gpio.h> | 34 | #include <linux/of_gpio.h> |
| 35 | 35 | ||
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 03e2beb7b4f0..d93175b03a12 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 15 | #include <linux/spi/spi.h> | 15 | #include <linux/spi/spi.h> |
| 16 | #include <linux/regulator/consumer.h> | 16 | #include <linux/regulator/consumer.h> |
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio/consumer.h> |
| 18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include <linux/of_gpio.h> | 20 | #include <linux/of_gpio.h> |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8730646a0cbb..56c43f355ce3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
| @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) | |||
| 1167 | { | 1167 | { |
| 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); |
| 1169 | struct regulator *vdds_dsi; | 1169 | struct regulator *vdds_dsi; |
| 1170 | int r; | ||
| 1171 | 1170 | ||
| 1172 | if (dsi->vdds_dsi_reg != NULL) | 1171 | if (dsi->vdds_dsi_reg != NULL) |
| 1173 | return 0; | 1172 | return 0; |
| @@ -1180,15 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) | |||
| 1180 | return PTR_ERR(vdds_dsi); | 1179 | return PTR_ERR(vdds_dsi); |
| 1181 | } | 1180 | } |
| 1182 | 1181 | ||
| 1183 | if (regulator_can_change_voltage(vdds_dsi)) { | ||
| 1184 | r = regulator_set_voltage(vdds_dsi, 1800000, 1800000); | ||
| 1185 | if (r) { | ||
| 1186 | devm_regulator_put(vdds_dsi); | ||
| 1187 | DSSERR("can't set the DSI regulator voltage\n"); | ||
| 1188 | return r; | ||
| 1189 | } | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | dsi->vdds_dsi_reg = vdds_dsi; | 1182 | dsi->vdds_dsi_reg = vdds_dsi; |
| 1193 | 1183 | ||
| 1194 | return 0; | 1184 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index f95ff319e68e..3303cfad4838 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
| 31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
| 32 | #include <linux/clk.h> | 32 | #include <linux/clk.h> |
| 33 | #include <linux/pinctrl/consumer.h> | ||
| 33 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
| 34 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
| 35 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index f892ae157ff3..4d46cdf7a037 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/gpio.h> | 33 | #include <linux/gpio.h> |
| 34 | #include <linux/regulator/consumer.h> | 34 | #include <linux/regulator/consumer.h> |
| 35 | #include <linux/component.h> | 35 | #include <linux/component.h> |
| 36 | #include <linux/of.h> | ||
| 36 | #include <video/omapdss.h> | 37 | #include <video/omapdss.h> |
| 37 | #include <sound/omap-hdmi-audio.h> | 38 | #include <sound/omap-hdmi-audio.h> |
| 38 | 39 | ||
| @@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) | |||
| 100 | 101 | ||
| 101 | static int hdmi_init_regulator(void) | 102 | static int hdmi_init_regulator(void) |
| 102 | { | 103 | { |
| 103 | int r; | ||
| 104 | struct regulator *reg; | 104 | struct regulator *reg; |
| 105 | 105 | ||
| 106 | if (hdmi.vdda_reg != NULL) | 106 | if (hdmi.vdda_reg != NULL) |
| @@ -114,15 +114,6 @@ static int hdmi_init_regulator(void) | |||
| 114 | return PTR_ERR(reg); | 114 | return PTR_ERR(reg); |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | if (regulator_can_change_voltage(reg)) { | ||
| 118 | r = regulator_set_voltage(reg, 1800000, 1800000); | ||
| 119 | if (r) { | ||
| 120 | devm_regulator_put(reg); | ||
| 121 | DSSWARN("can't set the regulator voltage\n"); | ||
| 122 | return r; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | hdmi.vdda_reg = reg; | 117 | hdmi.vdda_reg = reg; |
| 127 | 118 | ||
| 128 | return 0; | 119 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index fa72e735dad2..ef3afe99e487 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c | |||
| @@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg) | |||
| 211 | static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) | 211 | static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) |
| 212 | { | 212 | { |
| 213 | DSSDBG("Enter hdmi_core_powerdown_disable\n"); | 213 | DSSDBG("Enter hdmi_core_powerdown_disable\n"); |
| 214 | REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); | 214 | REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0); |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static void hdmi_core_swreset_release(struct hdmi_core_data *core) | 217 | static void hdmi_core_swreset_release(struct hdmi_core_data *core) |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index a43f7b10e113..9255c0e1e4a7 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/gpio.h> | 38 | #include <linux/gpio.h> |
| 39 | #include <linux/regulator/consumer.h> | 39 | #include <linux/regulator/consumer.h> |
| 40 | #include <linux/component.h> | 40 | #include <linux/component.h> |
| 41 | #include <linux/of.h> | ||
| 41 | #include <video/omapdss.h> | 42 | #include <video/omapdss.h> |
| 42 | #include <sound/omap-hdmi-audio.h> | 43 | #include <sound/omap-hdmi-audio.h> |
| 43 | 44 | ||
| @@ -119,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) | |||
| 119 | 120 | ||
| 120 | static int hdmi_init_regulator(void) | 121 | static int hdmi_init_regulator(void) |
| 121 | { | 122 | { |
| 122 | int r; | ||
| 123 | struct regulator *reg; | 123 | struct regulator *reg; |
| 124 | 124 | ||
| 125 | if (hdmi.vdda_reg != NULL) | 125 | if (hdmi.vdda_reg != NULL) |
| @@ -131,15 +131,6 @@ static int hdmi_init_regulator(void) | |||
| 131 | return PTR_ERR(reg); | 131 | return PTR_ERR(reg); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | if (regulator_can_change_voltage(reg)) { | ||
| 135 | r = regulator_set_voltage(reg, 1800000, 1800000); | ||
| 136 | if (r) { | ||
| 137 | devm_regulator_put(reg); | ||
| 138 | DSSWARN("can't set the regulator voltage\n"); | ||
| 139 | return r; | ||
| 140 | } | ||
| 141 | } | ||
| 142 | |||
| 143 | hdmi.vdda_reg = reg; | 134 | hdmi.vdda_reg = reg; |
| 144 | 135 | ||
| 145 | return 0; | 136 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 6a397520cae5..8ab2093daa12 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c | |||
| @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) | |||
| 51 | { | 51 | { |
| 52 | void __iomem *base = core->base; | 52 | void __iomem *base = core->base; |
| 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ | 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ |
| 54 | const unsigned ss_scl_high = 4000; /* ns */ | 54 | const unsigned ss_scl_high = 4600; /* ns */ |
| 55 | const unsigned ss_scl_low = 4700; /* ns */ | 55 | const unsigned ss_scl_low = 5400; /* ns */ |
| 56 | const unsigned fs_scl_high = 600; /* ns */ | 56 | const unsigned fs_scl_high = 600; /* ns */ |
| 57 | const unsigned fs_scl_low = 1300; /* ns */ | 57 | const unsigned fs_scl_low = 1300; /* ns */ |
| 58 | const unsigned sda_hold = 1000; /* ns */ | 58 | const unsigned sda_hold = 1000; /* ns */ |
| @@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, | |||
| 458 | 458 | ||
| 459 | c = (ptr[1] >> 6) & 0x3; | 459 | c = (ptr[1] >> 6) & 0x3; |
| 460 | m = (ptr[1] >> 4) & 0x3; | 460 | m = (ptr[1] >> 4) & 0x3; |
| 461 | r = (ptr[1] >> 0) & 0x3; | 461 | r = (ptr[1] >> 0) & 0xf; |
| 462 | 462 | ||
| 463 | itc = (ptr[2] >> 7) & 0x1; | 463 | itc = (ptr[2] >> 7) & 0x1; |
| 464 | ec = (ptr[2] >> 4) & 0x7; | 464 | ec = (ptr[2] >> 4) & 0x7; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 1f5d19c119ce..f98b750fc499 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/seq_file.h> | ||
| 16 | #include <video/omapdss.h> | 17 | #include <video/omapdss.h> |
| 17 | 18 | ||
| 18 | #include "dss.h" | 19 | #include "dss.h" |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index 06e23a7c432c..f1015e8b8267 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
| 17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
| 19 | #include <linux/seq_file.h> | ||
| 19 | 20 | ||
| 20 | #include <video/omapdss.h> | 21 | #include <video/omapdss.h> |
| 21 | 22 | ||
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 13442b9052d1..055f62fca5dc 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
| 15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
| 16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/seq_file.h> | ||
| 17 | #include <video/omapdss.h> | 18 | #include <video/omapdss.h> |
| 18 | 19 | ||
| 19 | #include "dss.h" | 20 | #include "dss.h" |
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index 6f5fc14fc015..479bf24050f8 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/seq_file.h> | ||
| 21 | |||
| 20 | #include <drm/drm_crtc.h> | 22 | #include <drm/drm_crtc.h> |
| 21 | #include <drm/drm_fb_helper.h> | 23 | #include <drm/drm_fb_helper.h> |
| 22 | 24 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index de275a5be1db..4ceed7a9762f 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/platform_device.h> /* platform_device() */ | 28 | #include <linux/platform_device.h> /* platform_device() */ |
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/seq_file.h> | ||
| 30 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 31 | #include <linux/time.h> | 32 | #include <linux/time.h> |
| 32 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 94ec06d3d737..f84570d1636c 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/seq_file.h> | ||
| 21 | |||
| 20 | #include <drm/drm_crtc.h> | 22 | #include <drm/drm_crtc.h> |
| 21 | #include <drm/drm_crtc_helper.h> | 23 | #include <drm/drm_crtc_helper.h> |
| 22 | 24 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index b97afc281778..03698b6c806c 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/seq_file.h> | ||
| 20 | #include <linux/shmem_fs.h> | 21 | #include <linux/shmem_fs.h> |
| 21 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
| 22 | #include <linux/pfn_t.h> | 23 | #include <linux/pfn_t.h> |
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 505620c7c2c8..e04deedabd4a 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c | |||
| @@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc) | |||
| 51 | mixer->status = STI_MIXER_DISABLING; | 51 | mixer->status = STI_MIXER_DISABLING; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, | ||
| 55 | const struct drm_display_mode *mode, | ||
| 56 | struct drm_display_mode *adjusted_mode) | ||
| 57 | { | ||
| 58 | /* accept the provided drm_display_mode, do not fix it up */ | ||
| 59 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
| 60 | return true; | ||
| 61 | } | ||
| 62 | |||
| 63 | static int | 54 | static int |
| 64 | sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) | 55 | sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) |
| 65 | { | 56 | { |
| @@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 230 | static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { | 221 | static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { |
| 231 | .enable = sti_crtc_enable, | 222 | .enable = sti_crtc_enable, |
| 232 | .disable = sti_crtc_disabling, | 223 | .disable = sti_crtc_disabling, |
| 233 | .mode_fixup = sti_crtc_mode_fixup, | ||
| 234 | .mode_set = drm_helper_crtc_mode_set, | 224 | .mode_set = drm_helper_crtc_mode_set, |
| 235 | .mode_set_nofb = sti_crtc_mode_set_nofb, | 225 | .mode_set_nofb = sti_crtc_mode_set_nofb, |
| 236 | .mode_set_base = drm_helper_crtc_mode_set_base, | 226 | .mode_set_base = drm_helper_crtc_mode_set_base, |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 904d0754ad78..0f18b76c7906 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 456 | 456 | ||
| 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); | 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); |
| 458 | 458 | ||
| 459 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 460 | vc4_state->mm.start); | ||
| 461 | |||
| 462 | if (debug_dump_regs) { | ||
| 463 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 464 | vc4_hvs_dump_state(dev); | ||
| 465 | } | ||
| 466 | |||
| 467 | if (crtc->state->event) { | 459 | if (crtc->state->event) { |
| 468 | unsigned long flags; | 460 | unsigned long flags; |
| 469 | 461 | ||
| @@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 473 | 465 | ||
| 474 | spin_lock_irqsave(&dev->event_lock, flags); | 466 | spin_lock_irqsave(&dev->event_lock, flags); |
| 475 | vc4_crtc->event = crtc->state->event; | 467 | vc4_crtc->event = crtc->state->event; |
| 476 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 477 | crtc->state->event = NULL; | 468 | crtc->state->event = NULL; |
| 469 | |||
| 470 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 471 | vc4_state->mm.start); | ||
| 472 | |||
| 473 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 474 | } else { | ||
| 475 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 476 | vc4_state->mm.start); | ||
| 477 | } | ||
| 478 | |||
| 479 | if (debug_dump_regs) { | ||
| 480 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 481 | vc4_hvs_dump_state(dev); | ||
| 478 | } | 482 | } |
| 479 | } | 483 | } |
| 480 | 484 | ||
| @@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) | |||
| 500 | { | 504 | { |
| 501 | struct drm_crtc *crtc = &vc4_crtc->base; | 505 | struct drm_crtc *crtc = &vc4_crtc->base; |
| 502 | struct drm_device *dev = crtc->dev; | 506 | struct drm_device *dev = crtc->dev; |
| 507 | struct vc4_dev *vc4 = to_vc4_dev(dev); | ||
| 508 | struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); | ||
| 509 | u32 chan = vc4_crtc->channel; | ||
| 503 | unsigned long flags; | 510 | unsigned long flags; |
| 504 | 511 | ||
| 505 | spin_lock_irqsave(&dev->event_lock, flags); | 512 | spin_lock_irqsave(&dev->event_lock, flags); |
| 506 | if (vc4_crtc->event) { | 513 | if (vc4_crtc->event && |
| 514 | (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { | ||
| 507 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); | 515 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); |
| 508 | vc4_crtc->event = NULL; | 516 | vc4_crtc->event = NULL; |
| 517 | drm_crtc_vblank_put(crtc); | ||
| 509 | } | 518 | } |
| 510 | spin_unlock_irqrestore(&dev->event_lock, flags); | 519 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 511 | } | 520 | } |
| @@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) | |||
| 556 | spin_unlock_irqrestore(&dev->event_lock, flags); | 565 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 557 | } | 566 | } |
| 558 | 567 | ||
| 568 | drm_crtc_vblank_put(crtc); | ||
| 559 | drm_framebuffer_unreference(flip_state->fb); | 569 | drm_framebuffer_unreference(flip_state->fb); |
| 560 | kfree(flip_state); | 570 | kfree(flip_state); |
| 561 | 571 | ||
| @@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, | |||
| 598 | return ret; | 608 | return ret; |
| 599 | } | 609 | } |
| 600 | 610 | ||
| 611 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
| 612 | |||
| 601 | /* Immediately update the plane's legacy fb pointer, so that later | 613 | /* Immediately update the plane's legacy fb pointer, so that later |
| 602 | * modeset prep sees the state that will be present when the semaphore | 614 | * modeset prep sees the state that will be present when the semaphore |
| 603 | * is released. | 615 | * is released. |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3446ece21b4a..250ed7e3754c 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
| @@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = { | |||
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { | 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { |
| 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), | 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), |
| 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), | 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), |
| 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), | 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), |
| 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), | 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), |
| 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), | 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), |
| 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), | 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), |
| 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, | 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, |
| 76 | DRM_ROOT_ONLY), | 76 | DRM_ROOT_ONLY), |
| 77 | }; | 77 | }; |
| @@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = { | |||
| 91 | 91 | ||
| 92 | .enable_vblank = vc4_enable_vblank, | 92 | .enable_vblank = vc4_enable_vblank, |
| 93 | .disable_vblank = vc4_disable_vblank, | 93 | .disable_vblank = vc4_disable_vblank, |
| 94 | .get_vblank_counter = drm_vblank_count, | 94 | .get_vblank_counter = drm_vblank_no_hw_counter, |
| 95 | 95 | ||
| 96 | #if defined(CONFIG_DEBUG_FS) | 96 | #if defined(CONFIG_DEBUG_FS) |
| 97 | .debugfs_init = vc4_debugfs_init, | 97 | .debugfs_init = vc4_debugfs_init, |
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index cb37751bc99f..861a623bc185 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c | |||
| @@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev, | |||
| 117 | return -ENOMEM; | 117 | return -ENOMEM; |
| 118 | 118 | ||
| 119 | /* Make sure that any outstanding modesets have finished. */ | 119 | /* Make sure that any outstanding modesets have finished. */ |
| 120 | ret = down_interruptible(&vc4->async_modeset); | 120 | if (nonblock) { |
| 121 | if (ret) { | 121 | ret = down_trylock(&vc4->async_modeset); |
| 122 | kfree(c); | 122 | if (ret) { |
| 123 | return ret; | 123 | kfree(c); |
| 124 | return -EBUSY; | ||
| 125 | } | ||
| 126 | } else { | ||
| 127 | ret = down_interruptible(&vc4->async_modeset); | ||
| 128 | if (ret) { | ||
| 129 | kfree(c); | ||
| 130 | return ret; | ||
| 131 | } | ||
| 124 | } | 132 | } |
| 125 | 133 | ||
| 126 | ret = drm_atomic_helper_prepare_planes(dev, state); | 134 | ret = drm_atomic_helper_prepare_planes(dev, state); |
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 6163b95c5411..f99eece4cc97 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h | |||
| @@ -341,6 +341,10 @@ | |||
| 341 | #define SCALER_DISPLACT0 0x00000030 | 341 | #define SCALER_DISPLACT0 0x00000030 |
| 342 | #define SCALER_DISPLACT1 0x00000034 | 342 | #define SCALER_DISPLACT1 0x00000034 |
| 343 | #define SCALER_DISPLACT2 0x00000038 | 343 | #define SCALER_DISPLACT2 0x00000038 |
| 344 | #define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \ | ||
| 345 | (x) * (SCALER_DISPLACT1 - \ | ||
| 346 | SCALER_DISPLACT0)) | ||
| 347 | |||
| 344 | #define SCALER_DISPCTRL0 0x00000040 | 348 | #define SCALER_DISPCTRL0 0x00000040 |
| 345 | # define SCALER_DISPCTRLX_ENABLE BIT(31) | 349 | # define SCALER_DISPCTRLX_ENABLE BIT(31) |
| 346 | # define SCALER_DISPCTRLX_RESET BIT(30) | 350 | # define SCALER_DISPCTRLX_RESET BIT(30) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6de283c8fa3e..f0374f9b56ca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
| 31 | #include <linux/frame.h> | ||
| 31 | #include <asm/hypervisor.h> | 32 | #include <asm/hypervisor.h> |
| 32 | #include "drmP.h" | 33 | #include "drmP.h" |
| 33 | #include "vmwgfx_msg.h" | 34 | #include "vmwgfx_msg.h" |
| @@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) | |||
| 194 | 195 | ||
| 195 | return -EINVAL; | 196 | return -EINVAL; |
| 196 | } | 197 | } |
| 197 | 198 | STACK_FRAME_NON_STANDARD(vmw_send_msg); | |
| 198 | 199 | ||
| 199 | 200 | ||
| 200 | /** | 201 | /** |
| @@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, | |||
| 304 | 305 | ||
| 305 | return 0; | 306 | return 0; |
| 306 | } | 307 | } |
| 308 | STACK_FRAME_NON_STANDARD(vmw_recv_msg); | ||
| 307 | 309 | ||
| 308 | 310 | ||
| 309 | /** | 311 | /** |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index eb97a9241d17..15aa49d082c4 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
| @@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data) | |||
| 172 | */ | 172 | */ |
| 173 | static int read_registers(struct fam15h_power_data *data) | 173 | static int read_registers(struct fam15h_power_data *data) |
| 174 | { | 174 | { |
| 175 | int this_cpu, ret, cpu; | ||
| 176 | int core, this_core; | 175 | int core, this_core; |
| 177 | cpumask_var_t mask; | 176 | cpumask_var_t mask; |
| 177 | int ret, cpu; | ||
| 178 | 178 | ||
| 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); | 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); |
| 180 | if (!ret) | 180 | if (!ret) |
| @@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); | 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); |
| 184 | 184 | ||
| 185 | get_online_cpus(); | 185 | get_online_cpus(); |
| 186 | this_cpu = smp_processor_id(); | ||
| 187 | 186 | ||
| 188 | /* | 187 | /* |
| 189 | * Choose the first online core of each compute unit, and then | 188 | * Choose the first online core of each compute unit, and then |
| @@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 205 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); | 204 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); |
| 206 | } | 205 | } |
| 207 | 206 | ||
| 208 | if (cpumask_test_cpu(this_cpu, mask)) | 207 | on_each_cpu_mask(mask, do_read_registers_on_cu, data, true); |
| 209 | do_read_registers_on_cu(data); | ||
| 210 | 208 | ||
| 211 | smp_call_function_many(mask, do_read_registers_on_cu, data, true); | ||
| 212 | put_online_cpus(); | 209 | put_online_cpus(); |
| 213 | |||
| 214 | free_cpumask_var(mask); | 210 | free_cpumask_var(mask); |
| 215 | 211 | ||
| 216 | return 0; | 212 | return 0; |
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index c9ff08dbe10c..e30a5939dc0d 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c | |||
| @@ -375,7 +375,7 @@ struct lm90_data { | |||
| 375 | int kind; | 375 | int kind; |
| 376 | u32 flags; | 376 | u32 flags; |
| 377 | 377 | ||
| 378 | int update_interval; /* in milliseconds */ | 378 | unsigned int update_interval; /* in milliseconds */ |
| 379 | 379 | ||
| 380 | u8 config_orig; /* Original configuration register value */ | 380 | u8 config_orig; /* Original configuration register value */ |
| 381 | u8 convrate_orig; /* Original conversion rate register value */ | 381 | u8 convrate_orig; /* Original conversion rate register value */ |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 64b1208bca5e..4a60ad214747 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -245,6 +245,13 @@ struct i801_priv { | |||
| 245 | struct platform_device *mux_pdev; | 245 | struct platform_device *mux_pdev; |
| 246 | #endif | 246 | #endif |
| 247 | struct platform_device *tco_pdev; | 247 | struct platform_device *tco_pdev; |
| 248 | |||
| 249 | /* | ||
| 250 | * If set to true the host controller registers are reserved for | ||
| 251 | * ACPI AML use. Protected by acpi_lock. | ||
| 252 | */ | ||
| 253 | bool acpi_reserved; | ||
| 254 | struct mutex acpi_lock; | ||
| 248 | }; | 255 | }; |
| 249 | 256 | ||
| 250 | #define FEATURE_SMBUS_PEC (1 << 0) | 257 | #define FEATURE_SMBUS_PEC (1 << 0) |
| @@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 718 | int ret = 0, xact = 0; | 725 | int ret = 0, xact = 0; |
| 719 | struct i801_priv *priv = i2c_get_adapdata(adap); | 726 | struct i801_priv *priv = i2c_get_adapdata(adap); |
| 720 | 727 | ||
| 728 | mutex_lock(&priv->acpi_lock); | ||
| 729 | if (priv->acpi_reserved) { | ||
| 730 | mutex_unlock(&priv->acpi_lock); | ||
| 731 | return -EBUSY; | ||
| 732 | } | ||
| 733 | |||
| 721 | pm_runtime_get_sync(&priv->pci_dev->dev); | 734 | pm_runtime_get_sync(&priv->pci_dev->dev); |
| 722 | 735 | ||
| 723 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) | 736 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) |
| @@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 820 | out: | 833 | out: |
| 821 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); | 834 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); |
| 822 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); | 835 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); |
| 836 | mutex_unlock(&priv->acpi_lock); | ||
| 823 | return ret; | 837 | return ret; |
| 824 | } | 838 | } |
| 825 | 839 | ||
| @@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1257 | priv->tco_pdev = pdev; | 1271 | priv->tco_pdev = pdev; |
| 1258 | } | 1272 | } |
| 1259 | 1273 | ||
| 1274 | #ifdef CONFIG_ACPI | ||
| 1275 | static acpi_status | ||
| 1276 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | ||
| 1277 | u64 *value, void *handler_context, void *region_context) | ||
| 1278 | { | ||
| 1279 | struct i801_priv *priv = handler_context; | ||
| 1280 | struct pci_dev *pdev = priv->pci_dev; | ||
| 1281 | acpi_status status; | ||
| 1282 | |||
| 1283 | /* | ||
| 1284 | * Once BIOS AML code touches the OpRegion we warn and inhibit any | ||
| 1285 | * further access from the driver itself. This device is now owned | ||
| 1286 | * by the system firmware. | ||
| 1287 | */ | ||
| 1288 | mutex_lock(&priv->acpi_lock); | ||
| 1289 | |||
| 1290 | if (!priv->acpi_reserved) { | ||
| 1291 | priv->acpi_reserved = true; | ||
| 1292 | |||
| 1293 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | ||
| 1294 | dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n"); | ||
| 1295 | |||
| 1296 | /* | ||
| 1297 | * BIOS is accessing the host controller so prevent it from | ||
| 1298 | * suspending automatically from now on. | ||
| 1299 | */ | ||
| 1300 | pm_runtime_get_sync(&pdev->dev); | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | if ((function & ACPI_IO_MASK) == ACPI_READ) | ||
| 1304 | status = acpi_os_read_port(address, (u32 *)value, bits); | ||
| 1305 | else | ||
| 1306 | status = acpi_os_write_port(address, (u32)*value, bits); | ||
| 1307 | |||
| 1308 | mutex_unlock(&priv->acpi_lock); | ||
| 1309 | |||
| 1310 | return status; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | static int i801_acpi_probe(struct i801_priv *priv) | ||
| 1314 | { | ||
| 1315 | struct acpi_device *adev; | ||
| 1316 | acpi_status status; | ||
| 1317 | |||
| 1318 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1319 | if (adev) { | ||
| 1320 | status = acpi_install_address_space_handler(adev->handle, | ||
| 1321 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler, | ||
| 1322 | NULL, priv); | ||
| 1323 | if (ACPI_SUCCESS(status)) | ||
| 1324 | return 0; | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]); | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | static void i801_acpi_remove(struct i801_priv *priv) | ||
| 1331 | { | ||
| 1332 | struct acpi_device *adev; | ||
| 1333 | |||
| 1334 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1335 | if (!adev) | ||
| 1336 | return; | ||
| 1337 | |||
| 1338 | acpi_remove_address_space_handler(adev->handle, | ||
| 1339 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler); | ||
| 1340 | |||
| 1341 | mutex_lock(&priv->acpi_lock); | ||
| 1342 | if (priv->acpi_reserved) | ||
| 1343 | pm_runtime_put(&priv->pci_dev->dev); | ||
| 1344 | mutex_unlock(&priv->acpi_lock); | ||
| 1345 | } | ||
| 1346 | #else | ||
| 1347 | static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } | ||
| 1348 | static inline void i801_acpi_remove(struct i801_priv *priv) { } | ||
| 1349 | #endif | ||
| 1350 | |||
| 1260 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | 1351 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) |
| 1261 | { | 1352 | { |
| 1262 | unsigned char temp; | 1353 | unsigned char temp; |
| @@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1274 | priv->adapter.dev.parent = &dev->dev; | 1365 | priv->adapter.dev.parent = &dev->dev; |
| 1275 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); | 1366 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); |
| 1276 | priv->adapter.retries = 3; | 1367 | priv->adapter.retries = 3; |
| 1368 | mutex_init(&priv->acpi_lock); | ||
| 1277 | 1369 | ||
| 1278 | priv->pci_dev = dev; | 1370 | priv->pci_dev = dev; |
| 1279 | switch (dev->device) { | 1371 | switch (dev->device) { |
| @@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1336 | return -ENODEV; | 1428 | return -ENODEV; |
| 1337 | } | 1429 | } |
| 1338 | 1430 | ||
| 1339 | err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); | 1431 | if (i801_acpi_probe(priv)) |
| 1340 | if (err) { | ||
| 1341 | return -ENODEV; | 1432 | return -ENODEV; |
| 1342 | } | ||
| 1343 | 1433 | ||
| 1344 | err = pcim_iomap_regions(dev, 1 << SMBBAR, | 1434 | err = pcim_iomap_regions(dev, 1 << SMBBAR, |
| 1345 | dev_driver_string(&dev->dev)); | 1435 | dev_driver_string(&dev->dev)); |
| @@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1348 | "Failed to request SMBus region 0x%lx-0x%Lx\n", | 1438 | "Failed to request SMBus region 0x%lx-0x%Lx\n", |
| 1349 | priv->smba, | 1439 | priv->smba, |
| 1350 | (unsigned long long)pci_resource_end(dev, SMBBAR)); | 1440 | (unsigned long long)pci_resource_end(dev, SMBBAR)); |
| 1441 | i801_acpi_remove(priv); | ||
| 1351 | return err; | 1442 | return err; |
| 1352 | } | 1443 | } |
| 1353 | 1444 | ||
| @@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1412 | err = i2c_add_adapter(&priv->adapter); | 1503 | err = i2c_add_adapter(&priv->adapter); |
| 1413 | if (err) { | 1504 | if (err) { |
| 1414 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); | 1505 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); |
| 1506 | i801_acpi_remove(priv); | ||
| 1415 | return err; | 1507 | return err; |
| 1416 | } | 1508 | } |
| 1417 | 1509 | ||
| @@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev) | |||
| 1438 | 1530 | ||
| 1439 | i801_del_mux(priv); | 1531 | i801_del_mux(priv); |
| 1440 | i2c_del_adapter(&priv->adapter); | 1532 | i2c_del_adapter(&priv->adapter); |
| 1533 | i801_acpi_remove(priv); | ||
| 1441 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); | 1534 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); |
| 1442 | 1535 | ||
| 1443 | platform_device_unregister(priv->tco_pdev); | 1536 | platform_device_unregister(priv->tco_pdev); |
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c index aa5f01efd826..30ae35146723 100644 --- a/drivers/i2c/busses/i2c-octeon.c +++ b/drivers/i2c/busses/i2c-octeon.c | |||
| @@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 934 | return result; | 934 | return result; |
| 935 | 935 | ||
| 936 | for (i = 0; i < length; i++) { | 936 | for (i = 0; i < length; i++) { |
| 937 | /* for the last byte TWSI_CTL_AAK must not be set */ | 937 | /* |
| 938 | if (i + 1 == length) | 938 | * For the last byte to receive TWSI_CTL_AAK must not be set. |
| 939 | * | ||
| 940 | * A special case is I2C_M_RECV_LEN where we don't know the | ||
| 941 | * additional length yet. If recv_len is set we assume we're | ||
| 942 | * not reading the final byte and therefore need to set | ||
| 943 | * TWSI_CTL_AAK. | ||
| 944 | */ | ||
| 945 | if ((i + 1 == length) && !(recv_len && i == 0)) | ||
| 939 | final_read = true; | 946 | final_read = true; |
| 940 | 947 | ||
| 941 | /* clear iflg to allow next event */ | 948 | /* clear iflg to allow next event */ |
| @@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 950 | 957 | ||
| 951 | data[i] = octeon_i2c_data_read(i2c); | 958 | data[i] = octeon_i2c_data_read(i2c); |
| 952 | if (recv_len && i == 0) { | 959 | if (recv_len && i == 0) { |
| 953 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) { | 960 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) |
| 954 | dev_err(i2c->dev, | ||
| 955 | "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n", | ||
| 956 | __func__, data[i]); | ||
| 957 | return -EPROTO; | 961 | return -EPROTO; |
| 958 | } | ||
| 959 | length += data[i]; | 962 | length += data[i]; |
| 960 | } | 963 | } |
| 961 | 964 | ||
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 6773cadf7c9f..26e7c5187a58 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c | |||
| @@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = { | |||
| 260 | .remove = i2c_mux_reg_remove, | 260 | .remove = i2c_mux_reg_remove, |
| 261 | .driver = { | 261 | .driver = { |
| 262 | .name = "i2c-mux-reg", | 262 | .name = "i2c-mux-reg", |
| 263 | .of_match_table = of_match_ptr(i2c_mux_reg_of_match), | ||
| 263 | }, | 264 | }, |
| 264 | }; | 265 | }; |
| 265 | 266 | ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index c2e257d97eff..040966775f40 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
| @@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 178 | { | 178 | { |
| 179 | int ret = 0; | 179 | int ret = 0; |
| 180 | struct net_device *old_net_dev; | 180 | struct net_device *old_net_dev; |
| 181 | enum ib_gid_type old_gid_type; | ||
| 181 | 182 | ||
| 182 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a | 183 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a |
| 183 | * sleep-able lock. | 184 | * sleep-able lock. |
| @@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 199 | } | 200 | } |
| 200 | 201 | ||
| 201 | old_net_dev = table->data_vec[ix].attr.ndev; | 202 | old_net_dev = table->data_vec[ix].attr.ndev; |
| 203 | old_gid_type = table->data_vec[ix].attr.gid_type; | ||
| 202 | if (old_net_dev && old_net_dev != attr->ndev) | 204 | if (old_net_dev && old_net_dev != attr->ndev) |
| 203 | dev_put(old_net_dev); | 205 | dev_put(old_net_dev); |
| 204 | /* if modify_gid failed, just delete the old gid */ | 206 | /* if modify_gid failed, just delete the old gid */ |
| @@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 207 | attr = &zattr; | 209 | attr = &zattr; |
| 208 | table->data_vec[ix].context = NULL; | 210 | table->data_vec[ix].context = NULL; |
| 209 | } | 211 | } |
| 210 | if (default_gid) | 212 | |
| 211 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 212 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); | 213 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); |
| 213 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); | 214 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); |
| 215 | if (default_gid) { | ||
| 216 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 217 | if (action == GID_TABLE_WRITE_ACTION_DEL) | ||
| 218 | table->data_vec[ix].attr.gid_type = old_gid_type; | ||
| 219 | } | ||
| 214 | if (table->data_vec[ix].attr.ndev && | 220 | if (table->data_vec[ix].attr.ndev && |
| 215 | table->data_vec[ix].attr.ndev != old_net_dev) | 221 | table->data_vec[ix].attr.ndev != old_net_dev) |
| 216 | dev_hold(table->data_vec[ix].attr.ndev); | 222 | dev_hold(table->data_vec[ix].attr.ndev); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1d92e091e22e..c99525512b34 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id) | |||
| 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
| 3453 | 3453 | ||
| 3454 | /* Check if the device started its remove_one */ | 3454 | /* Check if the device started its remove_one */ |
| 3455 | spin_lock_irq(&cm.lock); | 3455 | spin_lock_irqsave(&cm.lock, flags); |
| 3456 | if (!cm_dev->going_down) { | 3456 | if (!cm_dev->going_down) { |
| 3457 | queue_delayed_work(cm.wq, &work->work, 0); | 3457 | queue_delayed_work(cm.wq, &work->work, 0); |
| 3458 | } else { | 3458 | } else { |
| 3459 | kfree(work); | 3459 | kfree(work); |
| 3460 | ret = -ENODEV; | 3460 | ret = -ENODEV; |
| 3461 | } | 3461 | } |
| 3462 | spin_unlock_irq(&cm.lock); | 3462 | spin_unlock_irqrestore(&cm.lock, flags); |
| 3463 | 3463 | ||
| 3464 | out: | 3464 | out: |
| 3465 | return ret; | 3465 | return ret; |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 5516fb070344..5c155fa91eec 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device, | |||
| 661 | if (err || port_attr->subnet_prefix) | 661 | if (err || port_attr->subnet_prefix) |
| 662 | return err; | 662 | return err; |
| 663 | 663 | ||
| 664 | if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) | ||
| 665 | return 0; | ||
| 666 | |||
| 664 | err = ib_query_gid(device, port_num, 0, &gid, NULL); | 667 | err = ib_query_gid(device, port_num, 0, &gid, NULL); |
| 665 | if (err) | 668 | if (err) |
| 666 | return err; | 669 | return err; |
| @@ -1024,7 +1027,8 @@ static int __init ib_core_init(void) | |||
| 1024 | goto err_mad; | 1027 | goto err_mad; |
| 1025 | } | 1028 | } |
| 1026 | 1029 | ||
| 1027 | if (ib_add_ibnl_clients()) { | 1030 | ret = ib_add_ibnl_clients(); |
| 1031 | if (ret) { | ||
| 1028 | pr_warn("Couldn't register ibnl clients\n"); | 1032 | pr_warn("Couldn't register ibnl clients\n"); |
| 1029 | goto err_sa; | 1033 | goto err_sa; |
| 1030 | } | 1034 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 43e3fa27102b..1c41b95cefec 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
| @@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb, | |||
| 506 | if (!nlmsg_request) { | 506 | if (!nlmsg_request) { |
| 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", | 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", |
| 508 | __func__, msg_seq); | 508 | __func__, msg_seq); |
| 509 | return -EINVAL; | 509 | return -EINVAL; |
| 510 | } | 510 | } |
| 511 | pm_msg = nlmsg_request->req_buffer; | 511 | pm_msg = nlmsg_request->req_buffer; |
| 512 | local_sockaddr = (struct sockaddr_storage *) | 512 | local_sockaddr = (struct sockaddr_storage *) |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 82fb511112da..2d49228f28b2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | |||
| 1638 | /* Now, check to see if there are any methods still in use */ | 1638 | /* Now, check to see if there are any methods still in use */ |
| 1639 | if (!check_method_table(method)) { | 1639 | if (!check_method_table(method)) { |
| 1640 | /* If not, release management method table */ | 1640 | /* If not, release management method table */ |
| 1641 | kfree(method); | 1641 | kfree(method); |
| 1642 | class->method_table[mgmt_class] = NULL; | 1642 | class->method_table[mgmt_class] = NULL; |
| 1643 | /* Any management classes left ? */ | 1643 | /* Any management classes left ? */ |
| 1644 | if (!check_class_table(class)) { | 1644 | if (!check_class_table(class)) { |
| 1645 | /* If not, release management class table */ | 1645 | /* If not, release management class table */ |
| 1646 | kfree(class); | 1646 | kfree(class); |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 5e573bb18660..a5793c8f1590 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) | |||
| 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, |
| 890 | u8 port_num) | 890 | u8 port_num) |
| 891 | { | 891 | { |
| 892 | struct attribute_group *hsag = NULL; | 892 | struct attribute_group *hsag; |
| 893 | struct rdma_hw_stats *stats; | 893 | struct rdma_hw_stats *stats; |
| 894 | int i = 0, ret; | 894 | int i, ret; |
| 895 | 895 | ||
| 896 | stats = device->alloc_hw_stats(device, port_num); | 896 | stats = device->alloc_hw_stats(device, port_num); |
| 897 | 897 | ||
| @@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 899 | return; | 899 | return; |
| 900 | 900 | ||
| 901 | if (!stats->names || stats->num_counters <= 0) | 901 | if (!stats->names || stats->num_counters <= 0) |
| 902 | goto err; | 902 | goto err_free_stats; |
| 903 | 903 | ||
| 904 | /* | ||
| 905 | * Two extra attribue elements here, one for the lifespan entry and | ||
| 906 | * one to NULL terminate the list for the sysfs core code | ||
| 907 | */ | ||
| 904 | hsag = kzalloc(sizeof(*hsag) + | 908 | hsag = kzalloc(sizeof(*hsag) + |
| 905 | // 1 extra for the lifespan config entry | 909 | sizeof(void *) * (stats->num_counters + 2), |
| 906 | sizeof(void *) * (stats->num_counters + 1), | ||
| 907 | GFP_KERNEL); | 910 | GFP_KERNEL); |
| 908 | if (!hsag) | 911 | if (!hsag) |
| 909 | return; | 912 | goto err_free_stats; |
| 910 | 913 | ||
| 911 | ret = device->get_hw_stats(device, stats, port_num, | 914 | ret = device->get_hw_stats(device, stats, port_num, |
| 912 | stats->num_counters); | 915 | stats->num_counters); |
| 913 | if (ret != stats->num_counters) | 916 | if (ret != stats->num_counters) |
| 914 | goto err; | 917 | goto err_free_hsag; |
| 915 | 918 | ||
| 916 | stats->timestamp = jiffies; | 919 | stats->timestamp = jiffies; |
| 917 | 920 | ||
| @@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 922 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); | 925 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); |
| 923 | if (!hsag->attrs[i]) | 926 | if (!hsag->attrs[i]) |
| 924 | goto err; | 927 | goto err; |
| 928 | sysfs_attr_init(hsag->attrs[i]); | ||
| 925 | } | 929 | } |
| 926 | 930 | ||
| 927 | /* treat an error here as non-fatal */ | 931 | /* treat an error here as non-fatal */ |
| 928 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); | 932 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); |
| 933 | if (hsag->attrs[i]) | ||
| 934 | sysfs_attr_init(hsag->attrs[i]); | ||
| 929 | 935 | ||
| 930 | if (port) { | 936 | if (port) { |
| 931 | struct kobject *kobj = &port->kobj; | 937 | struct kobject *kobj = &port->kobj; |
| @@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 946 | return; | 952 | return; |
| 947 | 953 | ||
| 948 | err: | 954 | err: |
| 949 | kfree(stats); | ||
| 950 | for (; i >= 0; i--) | 955 | for (; i >= 0; i--) |
| 951 | kfree(hsag->attrs[i]); | 956 | kfree(hsag->attrs[i]); |
| 957 | err_free_hsag: | ||
| 952 | kfree(hsag); | 958 | kfree(hsag); |
| 959 | err_free_stats: | ||
| 960 | kfree(stats); | ||
| 953 | return; | 961 | return; |
| 954 | } | 962 | } |
| 955 | 963 | ||
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 6e7050ab9e16..14d7eeb09be6 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
| @@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 300 | const struct cpumask *node_mask, | 300 | const struct cpumask *node_mask, |
| 301 | *proc_mask = tsk_cpus_allowed(current); | 301 | *proc_mask = tsk_cpus_allowed(current); |
| 302 | struct cpu_mask_set *set = &dd->affinity->proc; | 302 | struct cpu_mask_set *set = &dd->affinity->proc; |
| 303 | char buf[1024]; | ||
| 304 | 303 | ||
| 305 | /* | 304 | /* |
| 306 | * check whether process/context affinity has already | 305 | * check whether process/context affinity has already |
| 307 | * been set | 306 | * been set |
| 308 | */ | 307 | */ |
| 309 | if (cpumask_weight(proc_mask) == 1) { | 308 | if (cpumask_weight(proc_mask) == 1) { |
| 310 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 309 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", |
| 311 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s", | 310 | current->pid, current->comm, |
| 312 | current->pid, current->comm, buf); | 311 | cpumask_pr_args(proc_mask)); |
| 313 | /* | 312 | /* |
| 314 | * Mark the pre-set CPU as used. This is atomic so we don't | 313 | * Mark the pre-set CPU as used. This is atomic so we don't |
| 315 | * need the lock | 314 | * need the lock |
| @@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 318 | cpumask_set_cpu(cpu, &set->used); | 317 | cpumask_set_cpu(cpu, &set->used); |
| 319 | goto done; | 318 | goto done; |
| 320 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { | 319 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { |
| 321 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 320 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", |
| 322 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s", | 321 | current->pid, current->comm, |
| 323 | current->pid, current->comm, buf); | 322 | cpumask_pr_args(proc_mask)); |
| 324 | goto done; | 323 | goto done; |
| 325 | } | 324 | } |
| 326 | 325 | ||
| @@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 356 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? | 355 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? |
| 357 | &dd->affinity->rcv_intr.mask : | 356 | &dd->affinity->rcv_intr.mask : |
| 358 | &dd->affinity->rcv_intr.used)); | 357 | &dd->affinity->rcv_intr.used)); |
| 359 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs)); | 358 | hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", |
| 360 | hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf); | 359 | cpumask_pr_args(intrs)); |
| 361 | 360 | ||
| 362 | /* | 361 | /* |
| 363 | * If we don't have a NUMA node requested, preference is towards | 362 | * If we don't have a NUMA node requested, preference is towards |
| @@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 366 | if (node == -1) | 365 | if (node == -1) |
| 367 | node = dd->node; | 366 | node = dd->node; |
| 368 | node_mask = cpumask_of_node(node); | 367 | node_mask = cpumask_of_node(node); |
| 369 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask)); | 368 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node, |
| 370 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf); | 369 | cpumask_pr_args(node_mask)); |
| 371 | 370 | ||
| 372 | /* diff will hold all unused cpus */ | 371 | /* diff will hold all unused cpus */ |
| 373 | cpumask_andnot(diff, &set->mask, &set->used); | 372 | cpumask_andnot(diff, &set->mask, &set->used); |
| 374 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff)); | 373 | hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff)); |
| 375 | hfi1_cdbg(PROC, "unused CPUs (all) %s", buf); | ||
| 376 | 374 | ||
| 377 | /* get cpumask of available CPUs on preferred NUMA */ | 375 | /* get cpumask of available CPUs on preferred NUMA */ |
| 378 | cpumask_and(mask, diff, node_mask); | 376 | cpumask_and(mask, diff, node_mask); |
| 379 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 377 | hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask)); |
| 380 | hfi1_cdbg(PROC, "available cpus on NUMA %s", buf); | ||
| 381 | 378 | ||
| 382 | /* | 379 | /* |
| 383 | * At first, we don't want to place processes on the same | 380 | * At first, we don't want to place processes on the same |
| @@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 395 | cpumask_andnot(diff, &set->mask, &set->used); | 392 | cpumask_andnot(diff, &set->mask, &set->used); |
| 396 | cpumask_andnot(mask, diff, node_mask); | 393 | cpumask_andnot(mask, diff, node_mask); |
| 397 | } | 394 | } |
| 398 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 395 | hfi1_cdbg(PROC, "possible CPUs for process %*pbl", |
| 399 | hfi1_cdbg(PROC, "possible CPUs for process %s", buf); | 396 | cpumask_pr_args(mask)); |
| 400 | 397 | ||
| 401 | cpu = cpumask_first(mask); | 398 | cpu = cpumask_first(mask); |
| 402 | if (cpu >= nr_cpu_ids) /* empty */ | 399 | if (cpu >= nr_cpu_ids) /* empty */ |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 3b876da745a1..81619fbb5842 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -7832,8 +7832,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) | |||
| 7832 | * save first 2 flits in the packet that caused | 7832 | * save first 2 flits in the packet that caused |
| 7833 | * the error | 7833 | * the error |
| 7834 | */ | 7834 | */ |
| 7835 | dd->err_info_rcvport.packet_flit1 = hdr0; | 7835 | dd->err_info_rcvport.packet_flit1 = hdr0; |
| 7836 | dd->err_info_rcvport.packet_flit2 = hdr1; | 7836 | dd->err_info_rcvport.packet_flit2 = hdr1; |
| 7837 | } | 7837 | } |
| 7838 | switch (info) { | 7838 | switch (info) { |
| 7839 | case 1: | 7839 | case 1: |
| @@ -11906,7 +11906,7 @@ static void update_synth_timer(unsigned long opaque) | |||
| 11906 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); | 11906 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); |
| 11907 | } | 11907 | } |
| 11908 | 11908 | ||
| 11909 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); | 11909 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); |
| 11910 | } | 11910 | } |
| 11911 | 11911 | ||
| 11912 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ | 11912 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ |
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 5cc492e5776d..0d28a5a40fae 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
| @@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) | |||
| 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), | 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), |
| 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, | 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, |
| 1339 | dd->rcvhdrtail_dummy_physaddr); | 1339 | dd->rcvhdrtail_dummy_physaddr); |
| 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; | 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; |
| 1341 | } | 1341 | } |
| 1342 | 1342 | ||
| 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { | 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { |
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 79b2952c0dfb..4cfb13771897 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c | |||
| @@ -214,19 +214,6 @@ const char *print_u32_array( | |||
| 214 | return ret; | 214 | return ret; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | const char *print_u64_array( | ||
| 218 | struct trace_seq *p, | ||
| 219 | u64 *arr, int len) | ||
| 220 | { | ||
| 221 | int i; | ||
| 222 | const char *ret = trace_seq_buffer_ptr(p); | ||
| 223 | |||
| 224 | for (i = 0; i < len; i++) | ||
| 225 | trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]); | ||
| 226 | trace_seq_putc(p, 0); | ||
| 227 | return ret; | ||
| 228 | } | ||
| 229 | |||
| 230 | __hfi1_trace_fn(PKT); | 217 | __hfi1_trace_fn(PKT); |
| 231 | __hfi1_trace_fn(PROC); | 218 | __hfi1_trace_fn(PROC); |
| 232 | __hfi1_trace_fn(SDMA); | 219 | __hfi1_trace_fn(SDMA); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 29f4795f866c..47ffd273ecbd 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
| @@ -183,7 +183,7 @@ struct user_sdma_iovec { | |||
| 183 | struct sdma_mmu_node *node; | 183 | struct sdma_mmu_node *node; |
| 184 | }; | 184 | }; |
| 185 | 185 | ||
| 186 | #define SDMA_CACHE_NODE_EVICT BIT(0) | 186 | #define SDMA_CACHE_NODE_EVICT 0 |
| 187 | 187 | ||
| 188 | struct sdma_mmu_node { | 188 | struct sdma_mmu_node { |
| 189 | struct mmu_rb_node rb; | 189 | struct mmu_rb_node rb; |
| @@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req, | |||
| 1355 | */ | 1355 | */ |
| 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", | 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", |
| 1357 | req->tidoffset, req->tidoffset / req->omfactor, | 1357 | req->tidoffset, req->tidoffset / req->omfactor, |
| 1358 | !!(req->omfactor - KDETH_OM_SMALL)); | 1358 | req->omfactor != KDETH_OM_SMALL); |
| 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, | 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, |
| 1360 | req->tidoffset / req->omfactor); | 1360 | req->tidoffset / req->omfactor); |
| 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, | 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, |
| 1362 | !!(req->omfactor - KDETH_OM_SMALL)); | 1362 | req->omfactor != KDETH_OM_SMALL); |
| 1363 | } | 1363 | } |
| 1364 | done: | 1364 | done: |
| 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, | 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b01ef6eee6e8..0eb09e104542 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; | 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; |
| 506 | else | 506 | else |
| 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; | 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; |
| 508 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 509 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 510 | } | 508 | } |
| 509 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 510 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 511 | 511 | ||
| 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; | 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; |
| 513 | 513 | ||
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index dabcc65bd65e..9c0e67bd2ba7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
| @@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
| 822 | int eqn; | 822 | int eqn; |
| 823 | int err; | 823 | int err; |
| 824 | 824 | ||
| 825 | if (entries < 0) | 825 | if (entries < 0 || |
| 826 | (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) | ||
| 826 | return ERR_PTR(-EINVAL); | 827 | return ERR_PTR(-EINVAL); |
| 827 | 828 | ||
| 828 | if (check_cq_create_flags(attr->flags)) | 829 | if (check_cq_create_flags(attr->flags)) |
| @@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
| 1168 | return -ENOSYS; | 1169 | return -ENOSYS; |
| 1169 | } | 1170 | } |
| 1170 | 1171 | ||
| 1171 | if (entries < 1) | 1172 | if (entries < 1 || |
| 1173 | entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { | ||
| 1174 | mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", | ||
| 1175 | entries, | ||
| 1176 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); | ||
| 1172 | return -EINVAL; | 1177 | return -EINVAL; |
| 1178 | } | ||
| 1173 | 1179 | ||
| 1174 | entries = roundup_pow_of_two(entries + 1); | 1180 | entries = roundup_pow_of_two(entries + 1); |
| 1175 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) | 1181 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) |
| 1176 | return -EINVAL; | 1182 | return -EINVAL; |
| 1177 | 1183 | ||
| 1178 | if (entries == ibcq->cqe + 1) | 1184 | if (entries == ibcq->cqe + 1) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c72797cd9e4f..b48ad85315dc 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
| 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) | 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) |
| 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; | 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; |
| 526 | 526 | ||
| 527 | if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) | ||
| 528 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 529 | |||
| 527 | props->vendor_part_id = mdev->pdev->device; | 530 | props->vendor_part_id = mdev->pdev->device; |
| 528 | props->hw_ver = mdev->pdev->revision; | 531 | props->hw_ver = mdev->pdev->revision; |
| 529 | 532 | ||
| @@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 915 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; | 918 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; |
| 916 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; | 919 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; |
| 917 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); | 920 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); |
| 918 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | 921 | if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) |
| 922 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | ||
| 919 | resp.cache_line_size = L1_CACHE_BYTES; | 923 | resp.cache_line_size = L1_CACHE_BYTES; |
| 920 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); | 924 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); |
| 921 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); | 925 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); |
| @@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 988 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) | 992 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) |
| 989 | resp.response_length += sizeof(resp.cqe_version); | 993 | resp.response_length += sizeof(resp.cqe_version); |
| 990 | 994 | ||
| 991 | if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | 995 | /* |
| 996 | * We don't want to expose information from the PCI bar that is located | ||
| 997 | * after 4096 bytes, so if the arch only supports larger pages, let's | ||
| 998 | * pretend we don't support reading the HCA's core clock. This is also | ||
| 999 | * forced by mmap function. | ||
| 1000 | */ | ||
| 1001 | if (PAGE_SIZE <= 4096 && | ||
| 1002 | field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | ||
| 992 | resp.comp_mask |= | 1003 | resp.comp_mask |= |
| 993 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; | 1004 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; |
| 994 | resp.hca_core_clock_offset = | 1005 | resp.hca_core_clock_offset = |
| @@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | |||
| 1798 | { | 1809 | { |
| 1799 | struct mlx5_ib_dev *dev = | 1810 | struct mlx5_ib_dev *dev = |
| 1800 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | 1811 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); |
| 1801 | return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), | 1812 | return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev), |
| 1802 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); | 1813 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); |
| 1803 | } | 1814 | } |
| 1804 | 1815 | ||
| @@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
| 1866 | break; | 1877 | break; |
| 1867 | 1878 | ||
| 1868 | case MLX5_DEV_EVENT_PORT_DOWN: | 1879 | case MLX5_DEV_EVENT_PORT_DOWN: |
| 1880 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1869 | ibev.event = IB_EVENT_PORT_ERR; | 1881 | ibev.event = IB_EVENT_PORT_ERR; |
| 1870 | port = (u8)param; | 1882 | port = (u8)param; |
| 1871 | break; | 1883 | break; |
| 1872 | 1884 | ||
| 1873 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1874 | /* not used by ULPs */ | ||
| 1875 | return; | ||
| 1876 | |||
| 1877 | case MLX5_DEV_EVENT_LID_CHANGE: | 1885 | case MLX5_DEV_EVENT_LID_CHANGE: |
| 1878 | ibev.event = IB_EVENT_LID_CHANGE; | 1886 | ibev.event = IB_EVENT_LID_CHANGE; |
| 1879 | port = (u8)param; | 1887 | port = (u8)param; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 504117657d41..ce434228a5ea 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 235 | qp->rq.max_gs = 0; | 235 | qp->rq.max_gs = 0; |
| 236 | qp->rq.wqe_cnt = 0; | 236 | qp->rq.wqe_cnt = 0; |
| 237 | qp->rq.wqe_shift = 0; | 237 | qp->rq.wqe_shift = 0; |
| 238 | cap->max_recv_wr = 0; | ||
| 239 | cap->max_recv_sge = 0; | ||
| 238 | } else { | 240 | } else { |
| 239 | if (ucmd) { | 241 | if (ucmd) { |
| 240 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; | 242 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; |
| @@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, | |||
| 1851 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | 1853 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, |
| 1852 | const struct ib_ah_attr *ah, | 1854 | const struct ib_ah_attr *ah, |
| 1853 | struct mlx5_qp_path *path, u8 port, int attr_mask, | 1855 | struct mlx5_qp_path *path, u8 port, int attr_mask, |
| 1854 | u32 path_flags, const struct ib_qp_attr *attr) | 1856 | u32 path_flags, const struct ib_qp_attr *attr, |
| 1857 | bool alt) | ||
| 1855 | { | 1858 | { |
| 1856 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); | 1859 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); |
| 1857 | int err; | 1860 | int err; |
| 1858 | 1861 | ||
| 1859 | if (attr_mask & IB_QP_PKEY_INDEX) | 1862 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 1860 | path->pkey_index = attr->pkey_index; | 1863 | path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index : |
| 1864 | attr->pkey_index); | ||
| 1861 | 1865 | ||
| 1862 | if (ah->ah_flags & IB_AH_GRH) { | 1866 | if (ah->ah_flags & IB_AH_GRH) { |
| 1863 | if (ah->grh.sgid_index >= | 1867 | if (ah->grh.sgid_index >= |
| @@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1877 | ah->grh.sgid_index); | 1881 | ah->grh.sgid_index); |
| 1878 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; | 1882 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; |
| 1879 | } else { | 1883 | } else { |
| 1880 | path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; | 1884 | path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; |
| 1881 | path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : | 1885 | path->fl_free_ar |= |
| 1882 | 0; | 1886 | (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; |
| 1883 | path->rlid = cpu_to_be16(ah->dlid); | 1887 | path->rlid = cpu_to_be16(ah->dlid); |
| 1884 | path->grh_mlid = ah->src_path_bits & 0x7f; | 1888 | path->grh_mlid = ah->src_path_bits & 0x7f; |
| 1885 | if (ah->ah_flags & IB_AH_GRH) | 1889 | if (ah->ah_flags & IB_AH_GRH) |
| @@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1903 | path->port = port; | 1907 | path->port = port; |
| 1904 | 1908 | ||
| 1905 | if (attr_mask & IB_QP_TIMEOUT) | 1909 | if (attr_mask & IB_QP_TIMEOUT) |
| 1906 | path->ackto_lt = attr->timeout << 3; | 1910 | path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3; |
| 1907 | 1911 | ||
| 1908 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) | 1912 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) |
| 1909 | return modify_raw_packet_eth_prio(dev->mdev, | 1913 | return modify_raw_packet_eth_prio(dev->mdev, |
| @@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2264 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); | 2268 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); |
| 2265 | 2269 | ||
| 2266 | if (attr_mask & IB_QP_PKEY_INDEX) | 2270 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 2267 | context->pri_path.pkey_index = attr->pkey_index; | 2271 | context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); |
| 2268 | 2272 | ||
| 2269 | /* todo implement counter_index functionality */ | 2273 | /* todo implement counter_index functionality */ |
| 2270 | 2274 | ||
| @@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2277 | if (attr_mask & IB_QP_AV) { | 2281 | if (attr_mask & IB_QP_AV) { |
| 2278 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, | 2282 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, |
| 2279 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, | 2283 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, |
| 2280 | attr_mask, 0, attr); | 2284 | attr_mask, 0, attr, false); |
| 2281 | if (err) | 2285 | if (err) |
| 2282 | goto out; | 2286 | goto out; |
| 2283 | } | 2287 | } |
| @@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2288 | if (attr_mask & IB_QP_ALT_PATH) { | 2292 | if (attr_mask & IB_QP_ALT_PATH) { |
| 2289 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, | 2293 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, |
| 2290 | &context->alt_path, | 2294 | &context->alt_path, |
| 2291 | attr->alt_port_num, attr_mask, 0, attr); | 2295 | attr->alt_port_num, |
| 2296 | attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, | ||
| 2297 | 0, attr, true); | ||
| 2292 | if (err) | 2298 | if (err) |
| 2293 | goto out; | 2299 | goto out; |
| 2294 | } | 2300 | } |
| @@ -4013,11 +4019,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 4013 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { | 4019 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { |
| 4014 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | 4020 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); |
| 4015 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | 4021 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); |
| 4016 | qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; | 4022 | qp_attr->alt_pkey_index = |
| 4023 | be16_to_cpu(context->alt_path.pkey_index); | ||
| 4017 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | 4024 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; |
| 4018 | } | 4025 | } |
| 4019 | 4026 | ||
| 4020 | qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; | 4027 | qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); |
| 4021 | qp_attr->port_num = context->pri_path.port; | 4028 | qp_attr->port_num = context->pri_path.port; |
| 4022 | 4029 | ||
| 4023 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | 4030 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ |
| @@ -4079,17 +4086,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
| 4079 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | 4086 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; |
| 4080 | 4087 | ||
| 4081 | if (!ibqp->uobject) { | 4088 | if (!ibqp->uobject) { |
| 4082 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; | 4089 | qp_attr->cap.max_send_wr = qp->sq.max_post; |
| 4083 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | 4090 | qp_attr->cap.max_send_sge = qp->sq.max_gs; |
| 4091 | qp_init_attr->qp_context = ibqp->qp_context; | ||
| 4084 | } else { | 4092 | } else { |
| 4085 | qp_attr->cap.max_send_wr = 0; | 4093 | qp_attr->cap.max_send_wr = 0; |
| 4086 | qp_attr->cap.max_send_sge = 0; | 4094 | qp_attr->cap.max_send_sge = 0; |
| 4087 | } | 4095 | } |
| 4088 | 4096 | ||
| 4089 | /* We don't support inline sends for kernel QPs (yet), and we | 4097 | qp_init_attr->qp_type = ibqp->qp_type; |
| 4090 | * don't know what userspace's value should be. | 4098 | qp_init_attr->recv_cq = ibqp->recv_cq; |
| 4091 | */ | 4099 | qp_init_attr->send_cq = ibqp->send_cq; |
| 4092 | qp_attr->cap.max_inline_data = 0; | 4100 | qp_init_attr->srq = ibqp->srq; |
| 4101 | qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
| 4093 | 4102 | ||
| 4094 | qp_init_attr->cap = qp_attr->cap; | 4103 | qp_init_attr->cap = qp_attr->cap; |
| 4095 | 4104 | ||
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 7209fbc03ccb..a0b6ebee4d8a 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
| 39 | #include <linux/dma-attrs.h> | ||
| 40 | #include <linux/iommu.h> | 39 | #include <linux/iommu.h> |
| 41 | #include <linux/workqueue.h> | 40 | #include <linux/workqueue.h> |
| 42 | #include <linux/list.h> | 41 | #include <linux/list.h> |
| @@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |||
| 112 | int i; | 111 | int i; |
| 113 | int flags; | 112 | int flags; |
| 114 | dma_addr_t pa; | 113 | dma_addr_t pa; |
| 115 | DEFINE_DMA_ATTRS(attrs); | ||
| 116 | |||
| 117 | if (dmasync) | ||
| 118 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | ||
| 119 | 114 | ||
| 120 | if (!can_do_mlock()) | 115 | if (!can_do_mlock()) |
| 121 | return -EPERM; | 116 | return -EPERM; |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 5fa4d4d81ee0..7de5134bec85 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
| @@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |||
| 502 | */ | 502 | */ |
| 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 504 | enum ib_qp_type type) | 504 | enum ib_qp_type type) |
| 505 | __releases(&qp->s_lock) | ||
| 506 | __releases(&qp->s_hlock) | ||
| 507 | __releases(&qp->r_lock) | ||
| 508 | __acquires(&qp->r_lock) | ||
| 509 | __acquires(&qp->s_hlock) | ||
| 510 | __acquires(&qp->s_lock) | ||
| 505 | { | 511 | { |
| 506 | if (qp->state != IB_QPS_RESET) { | 512 | if (qp->state != IB_QPS_RESET) { |
| 507 | qp->state = IB_QPS_RESET; | 513 | qp->state = IB_QPS_RESET; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index bab7db6fa9ab..4f7d9b48df64 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -94,6 +94,7 @@ enum { | |||
| 94 | IPOIB_NEIGH_TBL_FLUSH = 12, | 94 | IPOIB_NEIGH_TBL_FLUSH = 12, |
| 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, | 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, |
| 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, | 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, |
| 97 | IPOIB_FLAG_GOING_DOWN = 15, | ||
| 97 | 98 | ||
| 98 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 99 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
| 99 | 100 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index b2f42835d76d..951d9abcca8b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, | |||
| 1486 | { | 1486 | { |
| 1487 | struct net_device *dev = to_net_dev(d); | 1487 | struct net_device *dev = to_net_dev(d); |
| 1488 | int ret; | 1488 | int ret; |
| 1489 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
| 1490 | |||
| 1491 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags)) | ||
| 1492 | return -EPERM; | ||
| 1489 | 1493 | ||
| 1490 | if (!rtnl_trylock()) | 1494 | if (!rtnl_trylock()) |
| 1491 | return restart_syscall(); | 1495 | return restart_syscall(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 45c40a17d6a6..dc6d241b9406 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) | 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) |
| 1016 | return false; | 1016 | return false; |
| 1017 | 1017 | ||
| 1018 | netif_addr_lock(priv->dev); | 1018 | netif_addr_lock_bh(priv->dev); |
| 1019 | 1019 | ||
| 1020 | /* The subnet prefix may have changed, update it now so we won't have | 1020 | /* The subnet prefix may have changed, update it now so we won't have |
| 1021 | * to do it later | 1021 | * to do it later |
| @@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1026 | 1026 | ||
| 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; | 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; |
| 1028 | 1028 | ||
| 1029 | netif_addr_unlock(priv->dev); | 1029 | netif_addr_unlock_bh(priv->dev); |
| 1030 | 1030 | ||
| 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, | 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, |
| 1032 | priv->dev, &port, &index); | 1032 | priv->dev, &port, &index); |
| 1033 | 1033 | ||
| 1034 | netif_addr_lock(priv->dev); | 1034 | netif_addr_lock_bh(priv->dev); |
| 1035 | 1035 | ||
| 1036 | if (search_gid.global.interface_id != | 1036 | if (search_gid.global.interface_id != |
| 1037 | priv->local_gid.global.interface_id) | 1037 | priv->local_gid.global.interface_id) |
| @@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1092 | } | 1092 | } |
| 1093 | 1093 | ||
| 1094 | out: | 1094 | out: |
| 1095 | netif_addr_unlock(priv->dev); | 1095 | netif_addr_unlock_bh(priv->dev); |
| 1096 | 1096 | ||
| 1097 | return ret; | 1097 | return ret; |
| 1098 | } | 1098 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2d7c16346648..5f58c41ef787 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) | |||
| 1206 | neigh = NULL; | 1206 | neigh = NULL; |
| 1207 | goto out_unlock; | 1207 | goto out_unlock; |
| 1208 | } | 1208 | } |
| 1209 | neigh->alive = jiffies; | 1209 | |
| 1210 | if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) | ||
| 1211 | neigh->alive = jiffies; | ||
| 1210 | goto out_unlock; | 1212 | goto out_unlock; |
| 1211 | } | 1213 | } |
| 1212 | } | 1214 | } |
| @@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1851 | struct ipoib_dev_priv *child_priv; | 1853 | struct ipoib_dev_priv *child_priv; |
| 1852 | struct net_device *netdev = priv->dev; | 1854 | struct net_device *netdev = priv->dev; |
| 1853 | 1855 | ||
| 1854 | netif_addr_lock(netdev); | 1856 | netif_addr_lock_bh(netdev); |
| 1855 | 1857 | ||
| 1856 | memcpy(&priv->local_gid.global.interface_id, | 1858 | memcpy(&priv->local_gid.global.interface_id, |
| 1857 | &gid->global.interface_id, | 1859 | &gid->global.interface_id, |
| @@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1859 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); | 1861 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); |
| 1860 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | 1862 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); |
| 1861 | 1863 | ||
| 1862 | netif_addr_unlock(netdev); | 1864 | netif_addr_unlock_bh(netdev); |
| 1863 | 1865 | ||
| 1864 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 1866 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
| 1865 | down_read(&priv->vlan_rwsem); | 1867 | down_read(&priv->vlan_rwsem); |
| @@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1875 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); | 1877 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); |
| 1876 | int ret = 0; | 1878 | int ret = 0; |
| 1877 | 1879 | ||
| 1878 | netif_addr_lock(dev); | 1880 | netif_addr_lock_bh(dev); |
| 1879 | 1881 | ||
| 1880 | /* Make sure the QPN, reserved and subnet prefix match the current | 1882 | /* Make sure the QPN, reserved and subnet prefix match the current |
| 1881 | * lladdr, it also makes sure the lladdr is unicast. | 1883 | * lladdr, it also makes sure the lladdr is unicast. |
| @@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1885 | gid->global.interface_id == 0) | 1887 | gid->global.interface_id == 0) |
| 1886 | ret = -EINVAL; | 1888 | ret = -EINVAL; |
| 1887 | 1889 | ||
| 1888 | netif_addr_unlock(dev); | 1890 | netif_addr_unlock_bh(dev); |
| 1889 | 1891 | ||
| 1890 | return ret; | 1892 | return ret; |
| 1891 | } | 1893 | } |
| @@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
| 2141 | ib_unregister_event_handler(&priv->event_handler); | 2143 | ib_unregister_event_handler(&priv->event_handler); |
| 2142 | flush_workqueue(ipoib_workqueue); | 2144 | flush_workqueue(ipoib_workqueue); |
| 2143 | 2145 | ||
| 2146 | /* mark interface in the middle of destruction */ | ||
| 2147 | set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags); | ||
| 2148 | |||
| 2144 | rtnl_lock(); | 2149 | rtnl_lock(); |
| 2145 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); | 2150 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); |
| 2146 | rtnl_unlock(); | 2151 | rtnl_unlock(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 82fbc9442608..d3394b6add24 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 582 | return; | 582 | return; |
| 583 | } | 583 | } |
| 584 | priv->local_lid = port_attr.lid; | 584 | priv->local_lid = port_attr.lid; |
| 585 | netif_addr_lock(dev); | 585 | netif_addr_lock_bh(dev); |
| 586 | 586 | ||
| 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { | 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { |
| 588 | netif_addr_unlock(dev); | 588 | netif_addr_unlock_bh(dev); |
| 589 | return; | 589 | return; |
| 590 | } | 590 | } |
| 591 | netif_addr_unlock(dev); | 591 | netif_addr_unlock_bh(dev); |
| 592 | 592 | ||
| 593 | spin_lock_irq(&priv->lock); | 593 | spin_lock_irq(&priv->lock); |
| 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) | 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 64a35595eab8..a2f9f29c6ab5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
| 131 | 131 | ||
| 132 | ppriv = netdev_priv(pdev); | 132 | ppriv = netdev_priv(pdev); |
| 133 | 133 | ||
| 134 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 135 | return -EPERM; | ||
| 136 | |||
| 134 | snprintf(intf_name, sizeof intf_name, "%s.%04x", | 137 | snprintf(intf_name, sizeof intf_name, "%s.%04x", |
| 135 | ppriv->dev->name, pkey); | 138 | ppriv->dev->name, pkey); |
| 136 | priv = ipoib_intf_alloc(intf_name); | 139 | priv = ipoib_intf_alloc(intf_name); |
| @@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
| 183 | 186 | ||
| 184 | ppriv = netdev_priv(pdev); | 187 | ppriv = netdev_priv(pdev); |
| 185 | 188 | ||
| 189 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 190 | return -EPERM; | ||
| 191 | |||
| 186 | if (!rtnl_trylock()) | 192 | if (!rtnl_trylock()) |
| 187 | return restart_syscall(); | 193 | return restart_syscall(); |
| 188 | 194 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 646de170ec12..3322ed750172 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1457 | { | 1457 | { |
| 1458 | unsigned int sg_offset = 0; | 1458 | unsigned int sg_offset = 0; |
| 1459 | 1459 | ||
| 1460 | state->desc = req->indirect_desc; | ||
| 1461 | state->fr.next = req->fr_list; | 1460 | state->fr.next = req->fr_list; |
| 1462 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; | 1461 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; |
| 1463 | state->sg = scat; | 1462 | state->sg = scat; |
| @@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1489 | struct scatterlist *sg; | 1488 | struct scatterlist *sg; |
| 1490 | int i; | 1489 | int i; |
| 1491 | 1490 | ||
| 1492 | state->desc = req->indirect_desc; | ||
| 1493 | for_each_sg(scat, sg, count, i) { | 1491 | for_each_sg(scat, sg, count, i) { |
| 1494 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), | 1492 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), |
| 1495 | ib_sg_dma_len(dev->dev, sg), | 1493 | ib_sg_dma_len(dev->dev, sg), |
| @@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, | |||
| 1655 | target->indirect_size, DMA_TO_DEVICE); | 1653 | target->indirect_size, DMA_TO_DEVICE); |
| 1656 | 1654 | ||
| 1657 | memset(&state, 0, sizeof(state)); | 1655 | memset(&state, 0, sizeof(state)); |
| 1656 | state.desc = req->indirect_desc; | ||
| 1658 | if (dev->use_fast_reg) | 1657 | if (dev->use_fast_reg) |
| 1659 | ret = srp_map_sg_fr(&state, ch, req, scat, count); | 1658 | ret = srp_map_sg_fr(&state, ch, req, scat, count); |
| 1660 | else if (dev->use_fmr) | 1659 | else if (dev->use_fmr) |
| @@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device) | |||
| 3526 | int mr_page_shift, p; | 3525 | int mr_page_shift, p; |
| 3527 | u64 max_pages_per_mr; | 3526 | u64 max_pages_per_mr; |
| 3528 | 3527 | ||
| 3529 | srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); | 3528 | srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); |
| 3530 | if (!srp_dev) | 3529 | if (!srp_dev) |
| 3531 | return; | 3530 | return; |
| 3532 | 3531 | ||
| @@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device) | |||
| 3586 | IB_ACCESS_REMOTE_WRITE); | 3585 | IB_ACCESS_REMOTE_WRITE); |
| 3587 | if (IS_ERR(srp_dev->global_mr)) | 3586 | if (IS_ERR(srp_dev->global_mr)) |
| 3588 | goto err_pd; | 3587 | goto err_pd; |
| 3589 | } else { | ||
| 3590 | srp_dev->global_mr = NULL; | ||
| 3591 | } | 3588 | } |
| 3592 | 3589 | ||
| 3593 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { | 3590 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6bd881be24ea..5eb1f9e17a98 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | 41 | ||
| 42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) | 42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
| 43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | 43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) |
| 44 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) | ||
| 44 | 45 | ||
| 45 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) | 46 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
| 46 | 47 | ||
| @@ -82,6 +83,7 @@ struct its_node { | |||
| 82 | u64 flags; | 83 | u64 flags; |
| 83 | u32 ite_size; | 84 | u32 ite_size; |
| 84 | u32 device_ids; | 85 | u32 device_ids; |
| 86 | int numa_node; | ||
| 85 | }; | 87 | }; |
| 86 | 88 | ||
| 87 | #define ITS_ITT_ALIGN SZ_256 | 89 | #define ITS_ITT_ALIGN SZ_256 |
| @@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d) | |||
| 613 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | 615 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
| 614 | bool force) | 616 | bool force) |
| 615 | { | 617 | { |
| 616 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | 618 | unsigned int cpu; |
| 619 | const struct cpumask *cpu_mask = cpu_online_mask; | ||
| 617 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 620 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
| 618 | struct its_collection *target_col; | 621 | struct its_collection *target_col; |
| 619 | u32 id = its_get_event_id(d); | 622 | u32 id = its_get_event_id(d); |
| 620 | 623 | ||
| 624 | /* lpi cannot be routed to a redistributor that is on a foreign node */ | ||
| 625 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | ||
| 626 | if (its_dev->its->numa_node >= 0) { | ||
| 627 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | ||
| 628 | if (!cpumask_intersects(mask_val, cpu_mask)) | ||
| 629 | return -EINVAL; | ||
| 630 | } | ||
| 631 | } | ||
| 632 | |||
| 633 | cpu = cpumask_any_and(mask_val, cpu_mask); | ||
| 634 | |||
| 621 | if (cpu >= nr_cpu_ids) | 635 | if (cpu >= nr_cpu_ids) |
| 622 | return -EINVAL; | 636 | return -EINVAL; |
| 623 | 637 | ||
| @@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void) | |||
| 1101 | list_for_each_entry(its, &its_nodes, entry) { | 1115 | list_for_each_entry(its, &its_nodes, entry) { |
| 1102 | u64 target; | 1116 | u64 target; |
| 1103 | 1117 | ||
| 1118 | /* avoid cross node collections and its mapping */ | ||
| 1119 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | ||
| 1120 | struct device_node *cpu_node; | ||
| 1121 | |||
| 1122 | cpu_node = of_get_cpu_node(cpu, NULL); | ||
| 1123 | if (its->numa_node != NUMA_NO_NODE && | ||
| 1124 | its->numa_node != of_node_to_nid(cpu_node)) | ||
| 1125 | continue; | ||
| 1126 | } | ||
| 1127 | |||
| 1104 | /* | 1128 | /* |
| 1105 | * We now have to bind each collection to its target | 1129 | * We now have to bind each collection to its target |
| 1106 | * redistributor. | 1130 | * redistributor. |
| @@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, | |||
| 1351 | { | 1375 | { |
| 1352 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 1376 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
| 1353 | u32 event = its_get_event_id(d); | 1377 | u32 event = its_get_event_id(d); |
| 1378 | const struct cpumask *cpu_mask = cpu_online_mask; | ||
| 1379 | |||
| 1380 | /* get the cpu_mask of local node */ | ||
| 1381 | if (its_dev->its->numa_node >= 0) | ||
| 1382 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | ||
| 1354 | 1383 | ||
| 1355 | /* Bind the LPI to the first possible CPU */ | 1384 | /* Bind the LPI to the first possible CPU */ |
| 1356 | its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); | 1385 | its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); |
| 1357 | 1386 | ||
| 1358 | /* Map the GIC IRQ and event to the device */ | 1387 | /* Map the GIC IRQ and event to the device */ |
| 1359 | its_send_mapvi(its_dev, d->hwirq, event); | 1388 | its_send_mapvi(its_dev, d->hwirq, event); |
| @@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) | |||
| 1443 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; | 1472 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
| 1444 | } | 1473 | } |
| 1445 | 1474 | ||
| 1475 | static void __maybe_unused its_enable_quirk_cavium_23144(void *data) | ||
| 1476 | { | ||
| 1477 | struct its_node *its = data; | ||
| 1478 | |||
| 1479 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | ||
| 1480 | } | ||
| 1481 | |||
| 1446 | static const struct gic_quirk its_quirks[] = { | 1482 | static const struct gic_quirk its_quirks[] = { |
| 1447 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 | 1483 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
| 1448 | { | 1484 | { |
| @@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = { | |||
| 1452 | .init = its_enable_quirk_cavium_22375, | 1488 | .init = its_enable_quirk_cavium_22375, |
| 1453 | }, | 1489 | }, |
| 1454 | #endif | 1490 | #endif |
| 1491 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 | ||
| 1492 | { | ||
| 1493 | .desc = "ITS: Cavium erratum 23144", | ||
| 1494 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | ||
| 1495 | .mask = 0xffff0fff, | ||
| 1496 | .init = its_enable_quirk_cavium_23144, | ||
| 1497 | }, | ||
| 1498 | #endif | ||
| 1455 | { | 1499 | { |
| 1456 | } | 1500 | } |
| 1457 | }; | 1501 | }; |
| @@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node, | |||
| 1514 | its->base = its_base; | 1558 | its->base = its_base; |
| 1515 | its->phys_base = res.start; | 1559 | its->phys_base = res.start; |
| 1516 | its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; | 1560 | its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; |
| 1561 | its->numa_node = of_node_to_nid(node); | ||
| 1517 | 1562 | ||
| 1518 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); | 1563 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); |
| 1519 | if (!its->cmd_base) { | 1564 | if (!its->cmd_base) { |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fb042ba9a3db..2c5ba0e704bf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
| @@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable) | |||
| 155 | 155 | ||
| 156 | while (count--) { | 156 | while (count--) { |
| 157 | val = readl_relaxed(rbase + GICR_WAKER); | 157 | val = readl_relaxed(rbase + GICR_WAKER); |
| 158 | if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) | 158 | if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) |
| 159 | break; | 159 | break; |
| 160 | cpu_relax(); | 160 | cpu_relax(); |
| 161 | udelay(1); | 161 | udelay(1); |
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c index e7155db01d55..73addb4b625b 100644 --- a/drivers/irqchip/irq-pic32-evic.c +++ b/drivers/irqchip/irq-pic32-evic.c | |||
| @@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data, | |||
| 91 | /* set polarity for external interrupts only */ | 91 | /* set polarity for external interrupts only */ |
| 92 | for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { | 92 | for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { |
| 93 | if (priv->ext_irqs[i] == data->hwirq) { | 93 | if (priv->ext_irqs[i] == data->hwirq) { |
| 94 | ret = pic32_set_ext_polarity(i + 1, flow_type); | 94 | ret = pic32_set_ext_polarity(i, flow_type); |
| 95 | if (ret) | 95 | if (ret) |
| 96 | return ret; | 96 | return ret; |
| 97 | } | 97 | } |
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index d7723ce772b3..c04bc6afb965 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c | |||
| @@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 { | |||
| 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, |
| 1275 | const struct uvc_xu_control_mapping32 __user *up) | 1275 | const struct uvc_xu_control_mapping32 __user *up) |
| 1276 | { | 1276 | { |
| 1277 | struct uvc_menu_info __user *umenus; | ||
| 1278 | struct uvc_menu_info __user *kmenus; | ||
| 1279 | compat_caddr_t p; | 1277 | compat_caddr_t p; |
| 1280 | 1278 | ||
| 1281 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1279 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1292 | 1290 | ||
| 1293 | if (__get_user(p, &up->menu_info)) | 1291 | if (__get_user(p, &up->menu_info)) |
| 1294 | return -EFAULT; | 1292 | return -EFAULT; |
| 1295 | umenus = compat_ptr(p); | 1293 | kp->menu_info = compat_ptr(p); |
| 1296 | if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1297 | return -EFAULT; | ||
| 1298 | |||
| 1299 | kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus)); | ||
| 1300 | if (kmenus == NULL) | ||
| 1301 | return -EFAULT; | ||
| 1302 | kp->menu_info = kmenus; | ||
| 1303 | |||
| 1304 | if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1305 | return -EFAULT; | ||
| 1306 | 1294 | ||
| 1307 | return 0; | 1295 | return 0; |
| 1308 | } | 1296 | } |
| @@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1310 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | 1298 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, |
| 1311 | struct uvc_xu_control_mapping32 __user *up) | 1299 | struct uvc_xu_control_mapping32 __user *up) |
| 1312 | { | 1300 | { |
| 1313 | struct uvc_menu_info __user *umenus; | ||
| 1314 | struct uvc_menu_info __user *kmenus = kp->menu_info; | ||
| 1315 | compat_caddr_t p; | ||
| 1316 | |||
| 1317 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1301 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1318 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || | 1302 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || |
| 1319 | __put_user(kp->menu_count, &up->menu_count)) | 1303 | __put_user(kp->menu_count, &up->menu_count)) |
| @@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | |||
| 1322 | if (__clear_user(up->reserved, sizeof(up->reserved))) | 1306 | if (__clear_user(up->reserved, sizeof(up->reserved))) |
| 1323 | return -EFAULT; | 1307 | return -EFAULT; |
| 1324 | 1308 | ||
| 1325 | if (kp->menu_count == 0) | ||
| 1326 | return 0; | ||
| 1327 | |||
| 1328 | if (get_user(p, &up->menu_info)) | ||
| 1329 | return -EFAULT; | ||
| 1330 | umenus = compat_ptr(p); | ||
| 1331 | |||
| 1332 | if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus))) | ||
| 1333 | return -EFAULT; | ||
| 1334 | |||
| 1335 | return 0; | 1309 | return 0; |
| 1336 | } | 1310 | } |
| 1337 | 1311 | ||
| @@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 { | |||
| 1346 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | 1320 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, |
| 1347 | const struct uvc_xu_control_query32 __user *up) | 1321 | const struct uvc_xu_control_query32 __user *up) |
| 1348 | { | 1322 | { |
| 1349 | u8 __user *udata; | ||
| 1350 | u8 __user *kdata; | ||
| 1351 | compat_caddr_t p; | 1323 | compat_caddr_t p; |
| 1352 | 1324 | ||
| 1353 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1325 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1361 | 1333 | ||
| 1362 | if (__get_user(p, &up->data)) | 1334 | if (__get_user(p, &up->data)) |
| 1363 | return -EFAULT; | 1335 | return -EFAULT; |
| 1364 | udata = compat_ptr(p); | 1336 | kp->data = compat_ptr(p); |
| 1365 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1366 | return -EFAULT; | ||
| 1367 | |||
| 1368 | kdata = compat_alloc_user_space(kp->size); | ||
| 1369 | if (kdata == NULL) | ||
| 1370 | return -EFAULT; | ||
| 1371 | kp->data = kdata; | ||
| 1372 | |||
| 1373 | if (copy_in_user(kdata, udata, kp->size)) | ||
| 1374 | return -EFAULT; | ||
| 1375 | 1337 | ||
| 1376 | return 0; | 1338 | return 0; |
| 1377 | } | 1339 | } |
| @@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1379 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | 1341 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, |
| 1380 | struct uvc_xu_control_query32 __user *up) | 1342 | struct uvc_xu_control_query32 __user *up) |
| 1381 | { | 1343 | { |
| 1382 | u8 __user *udata; | ||
| 1383 | u8 __user *kdata = kp->data; | ||
| 1384 | compat_caddr_t p; | ||
| 1385 | |||
| 1386 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1344 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1387 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) | 1345 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) |
| 1388 | return -EFAULT; | 1346 | return -EFAULT; |
| 1389 | 1347 | ||
| 1390 | if (kp->size == 0) | ||
| 1391 | return 0; | ||
| 1392 | |||
| 1393 | if (get_user(p, &up->data)) | ||
| 1394 | return -EFAULT; | ||
| 1395 | udata = compat_ptr(p); | ||
| 1396 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1397 | return -EFAULT; | ||
| 1398 | |||
| 1399 | if (copy_in_user(udata, kdata, kp->size)) | ||
| 1400 | return -EFAULT; | ||
| 1401 | |||
| 1402 | return 0; | 1348 | return 0; |
| 1403 | } | 1349 | } |
| 1404 | 1350 | ||
| @@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | |||
| 1408 | static long uvc_v4l2_compat_ioctl32(struct file *file, | 1354 | static long uvc_v4l2_compat_ioctl32(struct file *file, |
| 1409 | unsigned int cmd, unsigned long arg) | 1355 | unsigned int cmd, unsigned long arg) |
| 1410 | { | 1356 | { |
| 1357 | struct uvc_fh *handle = file->private_data; | ||
| 1411 | union { | 1358 | union { |
| 1412 | struct uvc_xu_control_mapping xmap; | 1359 | struct uvc_xu_control_mapping xmap; |
| 1413 | struct uvc_xu_control_query xqry; | 1360 | struct uvc_xu_control_query xqry; |
| 1414 | } karg; | 1361 | } karg; |
| 1415 | void __user *up = compat_ptr(arg); | 1362 | void __user *up = compat_ptr(arg); |
| 1416 | mm_segment_t old_fs; | ||
| 1417 | long ret; | 1363 | long ret; |
| 1418 | 1364 | ||
| 1419 | switch (cmd) { | 1365 | switch (cmd) { |
| 1420 | case UVCIOC_CTRL_MAP32: | 1366 | case UVCIOC_CTRL_MAP32: |
| 1421 | cmd = UVCIOC_CTRL_MAP; | ||
| 1422 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); | 1367 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); |
| 1368 | if (ret) | ||
| 1369 | return ret; | ||
| 1370 | ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap); | ||
| 1371 | if (ret) | ||
| 1372 | return ret; | ||
| 1373 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1374 | if (ret) | ||
| 1375 | return ret; | ||
| 1376 | |||
| 1423 | break; | 1377 | break; |
| 1424 | 1378 | ||
| 1425 | case UVCIOC_CTRL_QUERY32: | 1379 | case UVCIOC_CTRL_QUERY32: |
| 1426 | cmd = UVCIOC_CTRL_QUERY; | ||
| 1427 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); | 1380 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); |
| 1381 | if (ret) | ||
| 1382 | return ret; | ||
| 1383 | ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry); | ||
| 1384 | if (ret) | ||
| 1385 | return ret; | ||
| 1386 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1387 | if (ret) | ||
| 1388 | return ret; | ||
| 1428 | break; | 1389 | break; |
| 1429 | 1390 | ||
| 1430 | default: | 1391 | default: |
| 1431 | return -ENOIOCTLCMD; | 1392 | return -ENOIOCTLCMD; |
| 1432 | } | 1393 | } |
| 1433 | 1394 | ||
| 1434 | old_fs = get_fs(); | ||
| 1435 | set_fs(KERNEL_DS); | ||
| 1436 | ret = video_ioctl2(file, cmd, (unsigned long)&karg); | ||
| 1437 | set_fs(old_fs); | ||
| 1438 | |||
| 1439 | if (ret < 0) | ||
| 1440 | return ret; | ||
| 1441 | |||
| 1442 | switch (cmd) { | ||
| 1443 | case UVCIOC_CTRL_MAP: | ||
| 1444 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1445 | break; | ||
| 1446 | |||
| 1447 | case UVCIOC_CTRL_QUERY: | ||
| 1448 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1449 | break; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | return ret; | 1395 | return ret; |
| 1453 | } | 1396 | } |
| 1454 | #endif | 1397 | #endif |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c984321d1881..5d438ad3ee32 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card) | |||
| 1276 | * switch to HS200 mode if bus width is set successfully. | 1276 | * switch to HS200 mode if bus width is set successfully. |
| 1277 | */ | 1277 | */ |
| 1278 | err = mmc_select_bus_width(card); | 1278 | err = mmc_select_bus_width(card); |
| 1279 | if (!err) { | 1279 | if (err >= 0) { |
| 1280 | val = EXT_CSD_TIMING_HS200 | | 1280 | val = EXT_CSD_TIMING_HS200 | |
| 1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
| 1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| @@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
| 1583 | } else if (mmc_card_hs(card)) { | 1583 | } else if (mmc_card_hs(card)) { |
| 1584 | /* Select the desired bus width optionally */ | 1584 | /* Select the desired bus width optionally */ |
| 1585 | err = mmc_select_bus_width(card); | 1585 | err = mmc_select_bus_width(card); |
| 1586 | if (!err) { | 1586 | if (err >= 0) { |
| 1587 | err = mmc_select_hs_ddr(card); | 1587 | err = mmc_select_hs_ddr(card); |
| 1588 | if (err) | 1588 | if (err) |
| 1589 | goto free_card; | 1589 | goto free_card; |
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 7fc8b7aa83f0..2ee4c21ec55e 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
| @@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { | |||
| 970 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, | 970 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, |
| 971 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, | 971 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, |
| 972 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, | 972 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, |
| 973 | [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, | 973 | [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 }, |
| 974 | [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 }, | 974 | [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 }, |
| 975 | }; | 975 | }; |
| 976 | 976 | ||
| 977 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, | 977 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, |
| @@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev) | |||
| 1129 | MMC_CAP_1_8V_DDR | | 1129 | MMC_CAP_1_8V_DDR | |
| 1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; | 1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; |
| 1131 | 1131 | ||
| 1132 | /* TODO MMC DDR is not working on A80 */ | ||
| 1133 | if (of_device_is_compatible(pdev->dev.of_node, | ||
| 1134 | "allwinner,sun9i-a80-mmc")) | ||
| 1135 | mmc->caps &= ~MMC_CAP_1_8V_DDR; | ||
| 1136 | |||
| 1137 | ret = mmc_of_parse(mmc); | 1132 | ret = mmc_of_parse(mmc); |
| 1138 | if (ret) | 1133 | if (ret) |
| 1139 | goto error_free_dma; | 1134 | goto error_free_dma; |
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 16419f550eff..058460bdd5a6 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c | |||
| @@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) | |||
| 141 | priv->bus = bus; | 141 | priv->bus = bus; |
| 142 | bus->priv = priv; | 142 | bus->priv = priv; |
| 143 | bus->parent = priv->dev; | 143 | bus->parent = priv->dev; |
| 144 | bus->name = "Synopsys MII Bus", | 144 | bus->name = "Synopsys MII Bus"; |
| 145 | bus->read = &arc_mdio_read; | 145 | bus->read = &arc_mdio_read; |
| 146 | bus->write = &arc_mdio_write; | 146 | bus->write = &arc_mdio_write; |
| 147 | bus->reset = &arc_mdio_reset; | 147 | bus->reset = &arc_mdio_reset; |
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 8fc93c5f6abc..d02c4240b7df 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h | |||
| @@ -96,6 +96,10 @@ struct alx_priv { | |||
| 96 | unsigned int rx_ringsz; | 96 | unsigned int rx_ringsz; |
| 97 | unsigned int rxbuf_size; | 97 | unsigned int rxbuf_size; |
| 98 | 98 | ||
| 99 | struct page *rx_page; | ||
| 100 | unsigned int rx_page_offset; | ||
| 101 | unsigned int rx_frag_size; | ||
| 102 | |||
| 99 | struct napi_struct napi; | 103 | struct napi_struct napi; |
| 100 | struct alx_tx_queue txq; | 104 | struct alx_tx_queue txq; |
| 101 | struct alx_rx_queue rxq; | 105 | struct alx_rx_queue rxq; |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 9fe8b5e310d1..c98acdc0d14f 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
| @@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry) | |||
| 70 | } | 70 | } |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp) | ||
| 74 | { | ||
| 75 | struct sk_buff *skb; | ||
| 76 | struct page *page; | ||
| 77 | |||
| 78 | if (alx->rx_frag_size > PAGE_SIZE) | ||
| 79 | return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); | ||
| 80 | |||
| 81 | page = alx->rx_page; | ||
| 82 | if (!page) { | ||
| 83 | alx->rx_page = page = alloc_page(gfp); | ||
| 84 | if (unlikely(!page)) | ||
| 85 | return NULL; | ||
| 86 | alx->rx_page_offset = 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | skb = build_skb(page_address(page) + alx->rx_page_offset, | ||
| 90 | alx->rx_frag_size); | ||
| 91 | if (likely(skb)) { | ||
| 92 | alx->rx_page_offset += alx->rx_frag_size; | ||
| 93 | if (alx->rx_page_offset >= PAGE_SIZE) | ||
| 94 | alx->rx_page = NULL; | ||
| 95 | else | ||
| 96 | get_page(page); | ||
| 97 | } | ||
| 98 | return skb; | ||
| 99 | } | ||
| 100 | |||
| 101 | |||
| 73 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | 102 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) |
| 74 | { | 103 | { |
| 75 | struct alx_rx_queue *rxq = &alx->rxq; | 104 | struct alx_rx_queue *rxq = &alx->rxq; |
| @@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |||
| 86 | while (!cur_buf->skb && next != rxq->read_idx) { | 115 | while (!cur_buf->skb && next != rxq->read_idx) { |
| 87 | struct alx_rfd *rfd = &rxq->rfd[cur]; | 116 | struct alx_rfd *rfd = &rxq->rfd[cur]; |
| 88 | 117 | ||
| 89 | skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); | 118 | skb = alx_alloc_skb(alx, gfp); |
| 90 | if (!skb) | 119 | if (!skb) |
| 91 | break; | 120 | break; |
| 92 | dma = dma_map_single(&alx->hw.pdev->dev, | 121 | dma = dma_map_single(&alx->hw.pdev->dev, |
| @@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |||
| 124 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); | 153 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); |
| 125 | } | 154 | } |
| 126 | 155 | ||
| 156 | |||
| 127 | return count; | 157 | return count; |
| 128 | } | 158 | } |
| 129 | 159 | ||
| @@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx) | |||
| 592 | kfree(alx->txq.bufs); | 622 | kfree(alx->txq.bufs); |
| 593 | kfree(alx->rxq.bufs); | 623 | kfree(alx->rxq.bufs); |
| 594 | 624 | ||
| 625 | if (alx->rx_page) { | ||
| 626 | put_page(alx->rx_page); | ||
| 627 | alx->rx_page = NULL; | ||
| 628 | } | ||
| 629 | |||
| 595 | dma_free_coherent(&alx->hw.pdev->dev, | 630 | dma_free_coherent(&alx->hw.pdev->dev, |
| 596 | alx->descmem.size, | 631 | alx->descmem.size, |
| 597 | alx->descmem.virt, | 632 | alx->descmem.virt, |
| @@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx) | |||
| 646 | alx->dev->name, alx); | 681 | alx->dev->name, alx); |
| 647 | if (!err) | 682 | if (!err) |
| 648 | goto out; | 683 | goto out; |
| 684 | |||
| 649 | /* fall back to legacy interrupt */ | 685 | /* fall back to legacy interrupt */ |
| 650 | pci_disable_msi(alx->hw.pdev); | 686 | pci_disable_msi(alx->hw.pdev); |
| 651 | } | 687 | } |
| @@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx) | |||
| 689 | struct pci_dev *pdev = alx->hw.pdev; | 725 | struct pci_dev *pdev = alx->hw.pdev; |
| 690 | struct alx_hw *hw = &alx->hw; | 726 | struct alx_hw *hw = &alx->hw; |
| 691 | int err; | 727 | int err; |
| 728 | unsigned int head_size; | ||
| 692 | 729 | ||
| 693 | err = alx_identify_hw(alx); | 730 | err = alx_identify_hw(alx); |
| 694 | if (err) { | 731 | if (err) { |
| @@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx) | |||
| 704 | 741 | ||
| 705 | hw->smb_timer = 400; | 742 | hw->smb_timer = 400; |
| 706 | hw->mtu = alx->dev->mtu; | 743 | hw->mtu = alx->dev->mtu; |
| 744 | |||
| 707 | alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); | 745 | alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); |
| 746 | head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + | ||
| 747 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 748 | alx->rx_frag_size = roundup_pow_of_two(head_size); | ||
| 749 | |||
| 708 | alx->tx_ringsz = 256; | 750 | alx->tx_ringsz = 256; |
| 709 | alx->rx_ringsz = 512; | 751 | alx->rx_ringsz = 512; |
| 710 | hw->imt = 200; | 752 | hw->imt = 200; |
| @@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) | |||
| 806 | { | 848 | { |
| 807 | struct alx_priv *alx = netdev_priv(netdev); | 849 | struct alx_priv *alx = netdev_priv(netdev); |
| 808 | int max_frame = ALX_MAX_FRAME_LEN(mtu); | 850 | int max_frame = ALX_MAX_FRAME_LEN(mtu); |
| 851 | unsigned int head_size; | ||
| 809 | 852 | ||
| 810 | if ((max_frame < ALX_MIN_FRAME_SIZE) || | 853 | if ((max_frame < ALX_MIN_FRAME_SIZE) || |
| 811 | (max_frame > ALX_MAX_FRAME_SIZE)) | 854 | (max_frame > ALX_MAX_FRAME_SIZE)) |
| @@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) | |||
| 817 | netdev->mtu = mtu; | 860 | netdev->mtu = mtu; |
| 818 | alx->hw.mtu = mtu; | 861 | alx->hw.mtu = mtu; |
| 819 | alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); | 862 | alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); |
| 863 | head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + | ||
| 864 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 865 | alx->rx_frag_size = roundup_pow_of_two(head_size); | ||
| 820 | netdev_update_features(netdev); | 866 | netdev_update_features(netdev); |
| 821 | if (netif_running(netdev)) | 867 | if (netif_running(netdev)) |
| 822 | alx_reinit(alx); | 868 | alx_reinit(alx); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 0a5b770cefaa..a59d55e25d5f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) | |||
| 12895 | return rc; | 12895 | return rc; |
| 12896 | } | 12896 | } |
| 12897 | 12897 | ||
| 12898 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | 12898 | static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp) |
| 12899 | { | 12899 | { |
| 12900 | struct bnx2x_vlan_entry *vlan; | 12900 | struct bnx2x_vlan_entry *vlan; |
| 12901 | int rc = 0; | 12901 | int rc = 0; |
| 12902 | 12902 | ||
| 12903 | if (!bp->vlan_cnt) { | 12903 | /* Configure all non-configured entries */ |
| 12904 | DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n"); | ||
| 12905 | return 0; | ||
| 12906 | } | ||
| 12907 | |||
| 12908 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | 12904 | list_for_each_entry(vlan, &bp->vlan_reg, link) { |
| 12909 | /* Prepare for cleanup in case of errors */ | 12905 | if (vlan->hw) |
| 12910 | if (rc) { | ||
| 12911 | vlan->hw = false; | ||
| 12912 | continue; | ||
| 12913 | } | ||
| 12914 | |||
| 12915 | if (!vlan->hw) | ||
| 12916 | continue; | 12906 | continue; |
| 12917 | 12907 | ||
| 12918 | DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); | 12908 | if (bp->vlan_cnt >= bp->vlan_credit) |
| 12909 | return -ENOBUFS; | ||
| 12919 | 12910 | ||
| 12920 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | 12911 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); |
| 12921 | if (rc) { | 12912 | if (rc) { |
| 12922 | BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); | 12913 | BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); |
| 12923 | vlan->hw = false; | 12914 | return rc; |
| 12924 | rc = -EINVAL; | ||
| 12925 | continue; | ||
| 12926 | } | 12915 | } |
| 12916 | |||
| 12917 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); | ||
| 12918 | vlan->hw = true; | ||
| 12919 | bp->vlan_cnt++; | ||
| 12927 | } | 12920 | } |
| 12928 | 12921 | ||
| 12929 | return rc; | 12922 | return 0; |
| 12923 | } | ||
| 12924 | |||
| 12925 | static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) | ||
| 12926 | { | ||
| 12927 | bool need_accept_any_vlan; | ||
| 12928 | |||
| 12929 | need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp); | ||
| 12930 | |||
| 12931 | if (bp->accept_any_vlan != need_accept_any_vlan) { | ||
| 12932 | bp->accept_any_vlan = need_accept_any_vlan; | ||
| 12933 | DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", | ||
| 12934 | bp->accept_any_vlan ? "raised" : "cleared"); | ||
| 12935 | if (set_rx_mode) { | ||
| 12936 | if (IS_PF(bp)) | ||
| 12937 | bnx2x_set_rx_mode_inner(bp); | ||
| 12938 | else | ||
| 12939 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12940 | } | ||
| 12941 | } | ||
| 12942 | } | ||
| 12943 | |||
| 12944 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | ||
| 12945 | { | ||
| 12946 | struct bnx2x_vlan_entry *vlan; | ||
| 12947 | |||
| 12948 | /* The hw forgot all entries after reload */ | ||
| 12949 | list_for_each_entry(vlan, &bp->vlan_reg, link) | ||
| 12950 | vlan->hw = false; | ||
| 12951 | bp->vlan_cnt = 0; | ||
| 12952 | |||
| 12953 | /* Don't set rx mode here. Our caller will do it. */ | ||
| 12954 | bnx2x_vlan_configure(bp, false); | ||
| 12955 | |||
| 12956 | return 0; | ||
| 12930 | } | 12957 | } |
| 12931 | 12958 | ||
| 12932 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | 12959 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12933 | { | 12960 | { |
| 12934 | struct bnx2x *bp = netdev_priv(dev); | 12961 | struct bnx2x *bp = netdev_priv(dev); |
| 12935 | struct bnx2x_vlan_entry *vlan; | 12962 | struct bnx2x_vlan_entry *vlan; |
| 12936 | bool hw = false; | ||
| 12937 | int rc = 0; | ||
| 12938 | |||
| 12939 | if (!netif_running(bp->dev)) { | ||
| 12940 | DP(NETIF_MSG_IFUP, | ||
| 12941 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12942 | return -EFAULT; | ||
| 12943 | } | ||
| 12944 | 12963 | ||
| 12945 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); | 12964 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); |
| 12946 | 12965 | ||
| @@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | |||
| 12948 | if (!vlan) | 12967 | if (!vlan) |
| 12949 | return -ENOMEM; | 12968 | return -ENOMEM; |
| 12950 | 12969 | ||
| 12951 | bp->vlan_cnt++; | ||
| 12952 | if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { | ||
| 12953 | DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n"); | ||
| 12954 | bp->accept_any_vlan = true; | ||
| 12955 | if (IS_PF(bp)) | ||
| 12956 | bnx2x_set_rx_mode_inner(bp); | ||
| 12957 | else | ||
| 12958 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12959 | } else if (bp->vlan_cnt <= bp->vlan_credit) { | ||
| 12960 | rc = __bnx2x_vlan_configure_vid(bp, vid, true); | ||
| 12961 | hw = true; | ||
| 12962 | } | ||
| 12963 | |||
| 12964 | vlan->vid = vid; | 12970 | vlan->vid = vid; |
| 12965 | vlan->hw = hw; | 12971 | vlan->hw = false; |
| 12972 | list_add_tail(&vlan->link, &bp->vlan_reg); | ||
| 12966 | 12973 | ||
| 12967 | if (!rc) { | 12974 | if (netif_running(dev)) |
| 12968 | list_add(&vlan->link, &bp->vlan_reg); | 12975 | bnx2x_vlan_configure(bp, true); |
| 12969 | } else { | ||
| 12970 | bp->vlan_cnt--; | ||
| 12971 | kfree(vlan); | ||
| 12972 | } | ||
| 12973 | |||
| 12974 | DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc); | ||
| 12975 | 12976 | ||
| 12976 | return rc; | 12977 | return 0; |
| 12977 | } | 12978 | } |
| 12978 | 12979 | ||
| 12979 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) | 12980 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12980 | { | 12981 | { |
| 12981 | struct bnx2x *bp = netdev_priv(dev); | 12982 | struct bnx2x *bp = netdev_priv(dev); |
| 12982 | struct bnx2x_vlan_entry *vlan; | 12983 | struct bnx2x_vlan_entry *vlan; |
| 12984 | bool found = false; | ||
| 12983 | int rc = 0; | 12985 | int rc = 0; |
| 12984 | 12986 | ||
| 12985 | if (!netif_running(bp->dev)) { | ||
| 12986 | DP(NETIF_MSG_IFUP, | ||
| 12987 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12988 | return -EFAULT; | ||
| 12989 | } | ||
| 12990 | |||
| 12991 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); | 12987 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); |
| 12992 | 12988 | ||
| 12993 | if (!bp->vlan_cnt) { | ||
| 12994 | BNX2X_ERR("Unable to kill VLAN %d\n", vid); | ||
| 12995 | return -EINVAL; | ||
| 12996 | } | ||
| 12997 | |||
| 12998 | list_for_each_entry(vlan, &bp->vlan_reg, link) | 12989 | list_for_each_entry(vlan, &bp->vlan_reg, link) |
| 12999 | if (vlan->vid == vid) | 12990 | if (vlan->vid == vid) { |
| 12991 | found = true; | ||
| 13000 | break; | 12992 | break; |
| 12993 | } | ||
| 13001 | 12994 | ||
| 13002 | if (vlan->vid != vid) { | 12995 | if (!found) { |
| 13003 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); | 12996 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); |
| 13004 | return -EINVAL; | 12997 | return -EINVAL; |
| 13005 | } | 12998 | } |
| 13006 | 12999 | ||
| 13007 | if (vlan->hw) | 13000 | if (netif_running(dev) && vlan->hw) { |
| 13008 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); | 13001 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); |
| 13002 | DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); | ||
| 13003 | bp->vlan_cnt--; | ||
| 13004 | } | ||
| 13009 | 13005 | ||
| 13010 | list_del(&vlan->link); | 13006 | list_del(&vlan->link); |
| 13011 | kfree(vlan); | 13007 | kfree(vlan); |
| 13012 | 13008 | ||
| 13013 | bp->vlan_cnt--; | 13009 | if (netif_running(dev)) |
| 13014 | 13010 | bnx2x_vlan_configure(bp, true); | |
| 13015 | if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { | ||
| 13016 | /* Configure all non-configured entries */ | ||
| 13017 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | ||
| 13018 | if (vlan->hw) | ||
| 13019 | continue; | ||
| 13020 | |||
| 13021 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | ||
| 13022 | if (rc) { | ||
| 13023 | BNX2X_ERR("Unable to config VLAN %d\n", | ||
| 13024 | vlan->vid); | ||
| 13025 | continue; | ||
| 13026 | } | ||
| 13027 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", | ||
| 13028 | vlan->vid); | ||
| 13029 | vlan->hw = true; | ||
| 13030 | } | ||
| 13031 | DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n"); | ||
| 13032 | bp->accept_any_vlan = false; | ||
| 13033 | if (IS_PF(bp)) | ||
| 13034 | bnx2x_set_rx_mode_inner(bp); | ||
| 13035 | else | ||
| 13036 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 13037 | } | ||
| 13038 | 13011 | ||
| 13039 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); | 13012 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); |
| 13040 | 13013 | ||
| @@ -13941,14 +13914,14 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13941 | bp->doorbells = bnx2x_vf_doorbells(bp); | 13914 | bp->doorbells = bnx2x_vf_doorbells(bp); |
| 13942 | rc = bnx2x_vf_pci_alloc(bp); | 13915 | rc = bnx2x_vf_pci_alloc(bp); |
| 13943 | if (rc) | 13916 | if (rc) |
| 13944 | goto init_one_exit; | 13917 | goto init_one_freemem; |
| 13945 | } else { | 13918 | } else { |
| 13946 | doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); | 13919 | doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); |
| 13947 | if (doorbell_size > pci_resource_len(pdev, 2)) { | 13920 | if (doorbell_size > pci_resource_len(pdev, 2)) { |
| 13948 | dev_err(&bp->pdev->dev, | 13921 | dev_err(&bp->pdev->dev, |
| 13949 | "Cannot map doorbells, bar size too small, aborting\n"); | 13922 | "Cannot map doorbells, bar size too small, aborting\n"); |
| 13950 | rc = -ENOMEM; | 13923 | rc = -ENOMEM; |
| 13951 | goto init_one_exit; | 13924 | goto init_one_freemem; |
| 13952 | } | 13925 | } |
| 13953 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), | 13926 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), |
| 13954 | doorbell_size); | 13927 | doorbell_size); |
| @@ -13957,19 +13930,19 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13957 | dev_err(&bp->pdev->dev, | 13930 | dev_err(&bp->pdev->dev, |
| 13958 | "Cannot map doorbell space, aborting\n"); | 13931 | "Cannot map doorbell space, aborting\n"); |
| 13959 | rc = -ENOMEM; | 13932 | rc = -ENOMEM; |
| 13960 | goto init_one_exit; | 13933 | goto init_one_freemem; |
| 13961 | } | 13934 | } |
| 13962 | 13935 | ||
| 13963 | if (IS_VF(bp)) { | 13936 | if (IS_VF(bp)) { |
| 13964 | rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); | 13937 | rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); |
| 13965 | if (rc) | 13938 | if (rc) |
| 13966 | goto init_one_exit; | 13939 | goto init_one_freemem; |
| 13967 | } | 13940 | } |
| 13968 | 13941 | ||
| 13969 | /* Enable SRIOV if capability found in configuration space */ | 13942 | /* Enable SRIOV if capability found in configuration space */ |
| 13970 | rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); | 13943 | rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); |
| 13971 | if (rc) | 13944 | if (rc) |
| 13972 | goto init_one_exit; | 13945 | goto init_one_freemem; |
| 13973 | 13946 | ||
| 13974 | /* calc qm_cid_count */ | 13947 | /* calc qm_cid_count */ |
| 13975 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); | 13948 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); |
| @@ -13988,7 +13961,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13988 | rc = bnx2x_set_int_mode(bp); | 13961 | rc = bnx2x_set_int_mode(bp); |
| 13989 | if (rc) { | 13962 | if (rc) { |
| 13990 | dev_err(&pdev->dev, "Cannot set interrupts\n"); | 13963 | dev_err(&pdev->dev, "Cannot set interrupts\n"); |
| 13991 | goto init_one_exit; | 13964 | goto init_one_freemem; |
| 13992 | } | 13965 | } |
| 13993 | BNX2X_DEV_INFO("set interrupts successfully\n"); | 13966 | BNX2X_DEV_INFO("set interrupts successfully\n"); |
| 13994 | 13967 | ||
| @@ -13996,7 +13969,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13996 | rc = register_netdev(dev); | 13969 | rc = register_netdev(dev); |
| 13997 | if (rc) { | 13970 | if (rc) { |
| 13998 | dev_err(&pdev->dev, "Cannot register net device\n"); | 13971 | dev_err(&pdev->dev, "Cannot register net device\n"); |
| 13999 | goto init_one_exit; | 13972 | goto init_one_freemem; |
| 14000 | } | 13973 | } |
| 14001 | BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); | 13974 | BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); |
| 14002 | 13975 | ||
| @@ -14029,6 +14002,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 14029 | 14002 | ||
| 14030 | return 0; | 14003 | return 0; |
| 14031 | 14004 | ||
| 14005 | init_one_freemem: | ||
| 14006 | bnx2x_free_mem_bp(bp); | ||
| 14007 | |||
| 14032 | init_one_exit: | 14008 | init_one_exit: |
| 14033 | bnx2x_disable_pcie_error_reporting(bp); | 14009 | bnx2x_disable_pcie_error_reporting(bp); |
| 14034 | 14010 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 72a2efff8e49..c777cde85ce4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); | 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); |
| 287 | txr->tx_prod = prod; | 287 | txr->tx_prod = prod; |
| 288 | 288 | ||
| 289 | tx_buf->is_push = 1; | ||
| 289 | netdev_tx_sent_queue(txq, skb->len); | 290 | netdev_tx_sent_queue(txq, skb->len); |
| 291 | wmb(); /* Sync is_push and byte queue before pushing data */ | ||
| 290 | 292 | ||
| 291 | push_len = (length + sizeof(*tx_push) + 7) / 8; | 293 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
| 292 | if (push_len > 16) { | 294 | if (push_len > 16) { |
| @@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 298 | push_len); | 300 | push_len); |
| 299 | } | 301 | } |
| 300 | 302 | ||
| 301 | tx_buf->is_push = 1; | ||
| 302 | goto tx_done; | 303 | goto tx_done; |
| 303 | } | 304 | } |
| 304 | 305 | ||
| @@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, | |||
| 1112 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) | 1113 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) |
| 1113 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); | 1114 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); |
| 1114 | 1115 | ||
| 1115 | if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { | 1116 | if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && |
| 1116 | netdev_features_t features = skb->dev->features; | 1117 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1117 | u16 vlan_proto = tpa_info->metadata >> | 1118 | u16 vlan_proto = tpa_info->metadata >> |
| 1118 | RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1119 | RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1120 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1119 | 1121 | ||
| 1120 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1122 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1121 | vlan_proto == ETH_P_8021Q) || | ||
| 1122 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1123 | vlan_proto == ETH_P_8021AD)) { | ||
| 1124 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1125 | tpa_info->metadata & | ||
| 1126 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1127 | } | ||
| 1128 | } | 1123 | } |
| 1129 | 1124 | ||
| 1130 | skb_checksum_none_assert(skb); | 1125 | skb_checksum_none_assert(skb); |
| @@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, | |||
| 1277 | 1272 | ||
| 1278 | skb->protocol = eth_type_trans(skb, dev); | 1273 | skb->protocol = eth_type_trans(skb, dev); |
| 1279 | 1274 | ||
| 1280 | if (rxcmp1->rx_cmp_flags2 & | 1275 | if ((rxcmp1->rx_cmp_flags2 & |
| 1281 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { | 1276 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && |
| 1282 | netdev_features_t features = skb->dev->features; | 1277 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1283 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); | 1278 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
| 1279 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1284 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1280 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1285 | 1281 | ||
| 1286 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1282 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1287 | vlan_proto == ETH_P_8021Q) || | ||
| 1288 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1289 | vlan_proto == ETH_P_8021AD)) | ||
| 1290 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1291 | meta_data & | ||
| 1292 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1293 | } | 1283 | } |
| 1294 | 1284 | ||
| 1295 | skb_checksum_none_assert(skb); | 1285 | skb_checksum_none_assert(skb); |
| @@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, | |||
| 5466 | 5456 | ||
| 5467 | if (!bnxt_rfs_capable(bp)) | 5457 | if (!bnxt_rfs_capable(bp)) |
| 5468 | features &= ~NETIF_F_NTUPLE; | 5458 | features &= ~NETIF_F_NTUPLE; |
| 5459 | |||
| 5460 | /* Both CTAG and STAG VLAN accelaration on the RX side have to be | ||
| 5461 | * turned on or off together. | ||
| 5462 | */ | ||
| 5463 | if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != | ||
| 5464 | (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { | ||
| 5465 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | ||
| 5466 | features &= ~(NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5467 | NETIF_F_HW_VLAN_STAG_RX); | ||
| 5468 | else | ||
| 5469 | features |= NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5470 | NETIF_F_HW_VLAN_STAG_RX; | ||
| 5471 | } | ||
| 5472 | |||
| 5469 | return features; | 5473 | return features; |
| 5470 | } | 5474 | } |
| 5471 | 5475 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index a2cdfc1261dc..50812a1d67bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
| @@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
| 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ | 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ |
| 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ | 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ |
| 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ | 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ |
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ | ||
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ | 148 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ |
| 148 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ | 149 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ |
| 149 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ | 150 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ |
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 41b010645100..4edb98c3c6c7 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c | |||
| @@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1195 | priv->mdio = mdiobus_alloc(); | 1195 | priv->mdio = mdiobus_alloc(); |
| 1196 | if (!priv->mdio) { | 1196 | if (!priv->mdio) { |
| 1197 | ret = -ENOMEM; | 1197 | ret = -ENOMEM; |
| 1198 | goto free; | 1198 | goto free2; |
| 1199 | } | 1199 | } |
| 1200 | 1200 | ||
| 1201 | priv->mdio->name = "ethoc-mdio"; | 1201 | priv->mdio->name = "ethoc-mdio"; |
| @@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1208 | ret = mdiobus_register(priv->mdio); | 1208 | ret = mdiobus_register(priv->mdio); |
| 1209 | if (ret) { | 1209 | if (ret) { |
| 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); | 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); |
| 1211 | goto free; | 1211 | goto free2; |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
| 1214 | ret = ethoc_mdio_probe(netdev); | 1214 | ret = ethoc_mdio_probe(netdev); |
| @@ -1241,9 +1241,10 @@ error2: | |||
| 1241 | error: | 1241 | error: |
| 1242 | mdiobus_unregister(priv->mdio); | 1242 | mdiobus_unregister(priv->mdio); |
| 1243 | mdiobus_free(priv->mdio); | 1243 | mdiobus_free(priv->mdio); |
| 1244 | free: | 1244 | free2: |
| 1245 | if (priv->clk) | 1245 | if (priv->clk) |
| 1246 | clk_disable_unprepare(priv->clk); | 1246 | clk_disable_unprepare(priv->clk); |
| 1247 | free: | ||
| 1247 | free_netdev(netdev); | 1248 | free_netdev(netdev); |
| 1248 | out: | 1249 | out: |
| 1249 | return ret; | 1250 | return ret; |
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 085f9125cf42..06f031715b57 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c | |||
| @@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) | |||
| 205 | * re-adding ourselves to the poll list. | 205 | * re-adding ourselves to the poll list. |
| 206 | */ | 206 | */ |
| 207 | 207 | ||
| 208 | if (priv->tx_skb && !tx_ctrl_ct) | 208 | if (priv->tx_skb && !tx_ctrl_ct) { |
| 209 | nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); | ||
| 209 | napi_reschedule(napi); | 210 | napi_reschedule(napi); |
| 211 | } | ||
| 210 | } | 212 | } |
| 211 | 213 | ||
| 212 | return work_done; | 214 | return work_done; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ca2cccc594fd..fea0f330ddbd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
| 1197 | fec16_to_cpu(bdp->cbd_datlen), | 1197 | fec16_to_cpu(bdp->cbd_datlen), |
| 1198 | DMA_TO_DEVICE); | 1198 | DMA_TO_DEVICE); |
| 1199 | bdp->cbd_bufaddr = cpu_to_fec32(0); | 1199 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
| 1200 | if (!skb) { | 1200 | if (!skb) |
| 1201 | bdp = fec_enet_get_nextdesc(bdp, &txq->bd); | 1201 | goto skb_done; |
| 1202 | continue; | ||
| 1203 | } | ||
| 1204 | 1202 | ||
| 1205 | /* Check for errors. */ | 1203 | /* Check for errors. */ |
| 1206 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 1204 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
| @@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
| 1239 | 1237 | ||
| 1240 | /* Free the sk buffer associated with this last transmit */ | 1238 | /* Free the sk buffer associated with this last transmit */ |
| 1241 | dev_kfree_skb_any(skb); | 1239 | dev_kfree_skb_any(skb); |
| 1242 | 1240 | skb_done: | |
| 1243 | /* Make sure the update to bdp and tx_skbuff are performed | 1241 | /* Make sure the update to bdp and tx_skbuff are performed |
| 1244 | * before dirty_tx | 1242 | * before dirty_tx |
| 1245 | */ | 1243 | */ |
| @@ -2418,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |||
| 2418 | return -EOPNOTSUPP; | 2416 | return -EOPNOTSUPP; |
| 2419 | 2417 | ||
| 2420 | if (ec->rx_max_coalesced_frames > 255) { | 2418 | if (ec->rx_max_coalesced_frames > 255) { |
| 2421 | pr_err("Rx coalesced frames exceed hardware limiation"); | 2419 | pr_err("Rx coalesced frames exceed hardware limitation\n"); |
| 2422 | return -EINVAL; | 2420 | return -EINVAL; |
| 2423 | } | 2421 | } |
| 2424 | 2422 | ||
| 2425 | if (ec->tx_max_coalesced_frames > 255) { | 2423 | if (ec->tx_max_coalesced_frames > 255) { |
| 2426 | pr_err("Tx coalesced frame exceed hardware limiation"); | 2424 | pr_err("Tx coalesced frame exceed hardware limitation\n"); |
| 2427 | return -EINVAL; | 2425 | return -EINVAL; |
| 2428 | } | 2426 | } |
| 2429 | 2427 | ||
| 2430 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); | 2428 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); |
| 2431 | if (cycle > 0xFFFF) { | 2429 | if (cycle > 0xFFFF) { |
| 2432 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2430 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2433 | return -EINVAL; | 2431 | return -EINVAL; |
| 2434 | } | 2432 | } |
| 2435 | 2433 | ||
| 2436 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); | 2434 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); |
| 2437 | if (cycle > 0xFFFF) { | 2435 | if (cycle > 0xFFFF) { |
| 2438 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2436 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2439 | return -EINVAL; | 2437 | return -EINVAL; |
| 2440 | } | 2438 | } |
| 2441 | 2439 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7615e0668acb..2e6785b6e8be 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2440 | tx_queue->tx_ring_size); | 2440 | tx_queue->tx_ring_size); |
| 2441 | 2441 | ||
| 2442 | if (likely(!nr_frags)) { | 2442 | if (likely(!nr_frags)) { |
| 2443 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | 2443 | if (likely(!do_tstamp)) |
| 2444 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
| 2444 | } else { | 2445 | } else { |
| 2445 | u32 lstatus_start = lstatus; | 2446 | u32 lstatus_start = lstatus; |
| 2446 | 2447 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3d746c887873..67a648c7d3a9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
| @@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev) | |||
| 46 | u32 link_stat = priv->link; | 46 | u32 link_stat = priv->link; |
| 47 | struct hnae_handle *h; | 47 | struct hnae_handle *h; |
| 48 | 48 | ||
| 49 | assert(priv && priv->ae_handle); | ||
| 50 | h = priv->ae_handle; | 49 | h = priv->ae_handle; |
| 51 | 50 | ||
| 52 | if (priv->phy) { | 51 | if (priv->phy) { |
| @@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, | |||
| 646 | { | 645 | { |
| 647 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 646 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
| 648 | 647 | ||
| 649 | assert(priv); | ||
| 650 | |||
| 651 | strncpy(drvinfo->version, HNAE_DRIVER_VERSION, | 648 | strncpy(drvinfo->version, HNAE_DRIVER_VERSION, |
| 652 | sizeof(drvinfo->version)); | 649 | sizeof(drvinfo->version)); |
| 653 | drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; | 650 | drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; |
| @@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev, | |||
| 720 | struct hnae_handle *h; | 717 | struct hnae_handle *h; |
| 721 | struct hnae_ae_ops *ops; | 718 | struct hnae_ae_ops *ops; |
| 722 | 719 | ||
| 723 | assert(priv || priv->ae_handle); | ||
| 724 | |||
| 725 | h = priv->ae_handle; | 720 | h = priv->ae_handle; |
| 726 | ops = h->dev->ops; | 721 | ops = h->dev->ops; |
| 727 | 722 | ||
| @@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev, | |||
| 780 | struct hnae_ae_ops *ops; | 775 | struct hnae_ae_ops *ops; |
| 781 | int ret; | 776 | int ret; |
| 782 | 777 | ||
| 783 | assert(priv || priv->ae_handle); | ||
| 784 | |||
| 785 | ops = priv->ae_handle->dev->ops; | 778 | ops = priv->ae_handle->dev->ops; |
| 786 | 779 | ||
| 787 | if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) | 780 | if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) |
| @@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, | |||
| 1111 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 1104 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
| 1112 | struct hnae_ae_ops *ops; | 1105 | struct hnae_ae_ops *ops; |
| 1113 | 1106 | ||
| 1114 | assert(priv || priv->ae_handle); | ||
| 1115 | |||
| 1116 | ops = priv->ae_handle->dev->ops; | 1107 | ops = priv->ae_handle->dev->ops; |
| 1117 | 1108 | ||
| 1118 | cmd->version = HNS_CHIP_VERSION; | 1109 | cmd->version = HNS_CHIP_VERSION; |
| @@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev) | |||
| 1135 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 1126 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
| 1136 | struct hnae_ae_ops *ops; | 1127 | struct hnae_ae_ops *ops; |
| 1137 | 1128 | ||
| 1138 | assert(priv || priv->ae_handle); | ||
| 1139 | |||
| 1140 | ops = priv->ae_handle->dev->ops; | 1129 | ops = priv->ae_handle->dev->ops; |
| 1141 | if (!ops->get_regs_len) { | 1130 | if (!ops->get_regs_len) { |
| 1142 | netdev_err(net_dev, "ops->get_regs_len is null!\n"); | 1131 | netdev_err(net_dev, "ops->get_regs_len is null!\n"); |
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 01fccec632ec..466939f8f0cf 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c | |||
| @@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, | |||
| 189 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 189 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 190 | hwbm_pool->construct = mvneta_bm_construct; | 190 | hwbm_pool->construct = mvneta_bm_construct; |
| 191 | hwbm_pool->priv = new_pool; | 191 | hwbm_pool->priv = new_pool; |
| 192 | spin_lock_init(&hwbm_pool->lock); | ||
| 192 | 193 | ||
| 193 | /* Create new pool */ | 194 | /* Create new pool */ |
| 194 | err = mvneta_bm_pool_create(priv, new_pool); | 195 | err = mvneta_bm_pool_create(priv, new_pool); |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c984462fad2a..4763252bbf85 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
| @@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | |||
| 133 | static void mtk_phy_link_adjust(struct net_device *dev) | 133 | static void mtk_phy_link_adjust(struct net_device *dev) |
| 134 | { | 134 | { |
| 135 | struct mtk_mac *mac = netdev_priv(dev); | 135 | struct mtk_mac *mac = netdev_priv(dev); |
| 136 | u16 lcl_adv = 0, rmt_adv = 0; | ||
| 137 | u8 flowctrl; | ||
| 136 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | | 138 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | |
| 137 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | | 139 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | |
| 138 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | | 140 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | |
| @@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev) | |||
| 150 | if (mac->phy_dev->link) | 152 | if (mac->phy_dev->link) |
| 151 | mcr |= MAC_MCR_FORCE_LINK; | 153 | mcr |= MAC_MCR_FORCE_LINK; |
| 152 | 154 | ||
| 153 | if (mac->phy_dev->duplex) | 155 | if (mac->phy_dev->duplex) { |
| 154 | mcr |= MAC_MCR_FORCE_DPX; | 156 | mcr |= MAC_MCR_FORCE_DPX; |
| 155 | 157 | ||
| 156 | if (mac->phy_dev->pause) | 158 | if (mac->phy_dev->pause) |
| 157 | mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; | 159 | rmt_adv = LPA_PAUSE_CAP; |
| 160 | if (mac->phy_dev->asym_pause) | ||
| 161 | rmt_adv |= LPA_PAUSE_ASYM; | ||
| 162 | |||
| 163 | if (mac->phy_dev->advertising & ADVERTISED_Pause) | ||
| 164 | lcl_adv |= ADVERTISE_PAUSE_CAP; | ||
| 165 | if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause) | ||
| 166 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | ||
| 167 | |||
| 168 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | ||
| 169 | |||
| 170 | if (flowctrl & FLOW_CTRL_TX) | ||
| 171 | mcr |= MAC_MCR_FORCE_TX_FC; | ||
| 172 | if (flowctrl & FLOW_CTRL_RX) | ||
| 173 | mcr |= MAC_MCR_FORCE_RX_FC; | ||
| 174 | |||
| 175 | netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", | ||
| 176 | flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", | ||
| 177 | flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); | ||
| 178 | } | ||
| 158 | 179 | ||
| 159 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | 180 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); |
| 160 | 181 | ||
| @@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 208 | u32 val, ge_mode; | 229 | u32 val, ge_mode; |
| 209 | 230 | ||
| 210 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); | 231 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); |
| 232 | if (!np && of_phy_is_fixed_link(mac->of_node)) | ||
| 233 | if (!of_phy_register_fixed_link(mac->of_node)) | ||
| 234 | np = of_node_get(mac->of_node); | ||
| 211 | if (!np) | 235 | if (!np) |
| 212 | return -ENODEV; | 236 | return -ENODEV; |
| 213 | 237 | ||
| 214 | switch (of_get_phy_mode(np)) { | 238 | switch (of_get_phy_mode(np)) { |
| 239 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 240 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 241 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 215 | case PHY_INTERFACE_MODE_RGMII: | 242 | case PHY_INTERFACE_MODE_RGMII: |
| 216 | ge_mode = 0; | 243 | ge_mode = 0; |
| 217 | break; | 244 | break; |
| @@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 236 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
| 237 | mac->phy_dev->speed = 0; | 264 | mac->phy_dev->speed = 0; |
| 238 | mac->phy_dev->duplex = 0; | 265 | mac->phy_dev->duplex = 0; |
| 239 | mac->phy_dev->supported &= PHY_BASIC_FEATURES; | 266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
| 267 | SUPPORTED_Asym_Pause; | ||
| 240 | mac->phy_dev->advertising = mac->phy_dev->supported | | 268 | mac->phy_dev->advertising = mac->phy_dev->supported | |
| 241 | ADVERTISED_Autoneg; | 269 | ADVERTISED_Autoneg; |
| 242 | phy_start_aneg(mac->phy_dev); | 270 | phy_start_aneg(mac->phy_dev); |
| @@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) | |||
| 280 | return 0; | 308 | return 0; |
| 281 | 309 | ||
| 282 | err_free_bus: | 310 | err_free_bus: |
| 283 | kfree(eth->mii_bus); | 311 | mdiobus_free(eth->mii_bus); |
| 284 | 312 | ||
| 285 | err_put_node: | 313 | err_put_node: |
| 286 | of_node_put(mii_np); | 314 | of_node_put(mii_np); |
| @@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) | |||
| 295 | 323 | ||
| 296 | mdiobus_unregister(eth->mii_bus); | 324 | mdiobus_unregister(eth->mii_bus); |
| 297 | of_node_put(eth->mii_bus->dev.of_node); | 325 | of_node_put(eth->mii_bus->dev.of_node); |
| 298 | kfree(eth->mii_bus); | 326 | mdiobus_free(eth->mii_bus); |
| 299 | } | 327 | } |
| 300 | 328 | ||
| 301 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | 329 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c761194bb323..fc95affaf76b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, | |||
| 362 | 362 | ||
| 363 | for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) | 363 | for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) |
| 364 | if (bitmap_iterator_test(&it)) | 364 | if (bitmap_iterator_test(&it)) |
| 365 | data[index++] = ((unsigned long *)&priv->stats)[i]; | 365 | data[index++] = ((unsigned long *)&dev->stats)[i]; |
| 366 | 366 | ||
| 367 | for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) | 367 | for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) |
| 368 | if (bitmap_iterator_test(&it)) | 368 | if (bitmap_iterator_test(&it)) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 92e0624f4cf0..19ceced6736c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev) | |||
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | 1298 | ||
| 1299 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | 1299 | static struct rtnl_link_stats64 * |
| 1300 | mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | ||
| 1300 | { | 1301 | { |
| 1301 | struct mlx4_en_priv *priv = netdev_priv(dev); | 1302 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 1302 | 1303 | ||
| 1303 | spin_lock_bh(&priv->stats_lock); | 1304 | spin_lock_bh(&priv->stats_lock); |
| 1304 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | 1305 | netdev_stats_to_stats64(stats, &dev->stats); |
| 1305 | spin_unlock_bh(&priv->stats_lock); | 1306 | spin_unlock_bh(&priv->stats_lock); |
| 1306 | 1307 | ||
| 1307 | return &priv->ret_stats; | 1308 | return stats; |
| 1308 | } | 1309 | } |
| 1309 | 1310 | ||
| 1310 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | 1311 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) |
| @@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev) | |||
| 1876 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | 1877 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) |
| 1877 | en_dbg(HW, priv, "Failed dumping statistics\n"); | 1878 | en_dbg(HW, priv, "Failed dumping statistics\n"); |
| 1878 | 1879 | ||
| 1879 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
| 1880 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | 1880 | memset(&priv->pstats, 0, sizeof(priv->pstats)); |
| 1881 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); | 1881 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); |
| 1882 | memset(&priv->port_stats, 0, sizeof(priv->port_stats)); | 1882 | memset(&priv->port_stats, 0, sizeof(priv->port_stats)); |
| @@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev) | |||
| 1892 | priv->tx_ring[i]->bytes = 0; | 1892 | priv->tx_ring[i]->bytes = 0; |
| 1893 | priv->tx_ring[i]->packets = 0; | 1893 | priv->tx_ring[i]->packets = 0; |
| 1894 | priv->tx_ring[i]->tx_csum = 0; | 1894 | priv->tx_ring[i]->tx_csum = 0; |
| 1895 | priv->tx_ring[i]->tx_dropped = 0; | ||
| 1896 | priv->tx_ring[i]->queue_stopped = 0; | ||
| 1897 | priv->tx_ring[i]->wake_queue = 0; | ||
| 1898 | priv->tx_ring[i]->tso_packets = 0; | ||
| 1899 | priv->tx_ring[i]->xmit_more = 0; | ||
| 1895 | } | 1900 | } |
| 1896 | for (i = 0; i < priv->rx_ring_num; i++) { | 1901 | for (i = 0; i < priv->rx_ring_num; i++) { |
| 1897 | priv->rx_ring[i]->bytes = 0; | 1902 | priv->rx_ring[i]->bytes = 0; |
| @@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
| 2482 | .ndo_stop = mlx4_en_close, | 2487 | .ndo_stop = mlx4_en_close, |
| 2483 | .ndo_start_xmit = mlx4_en_xmit, | 2488 | .ndo_start_xmit = mlx4_en_xmit, |
| 2484 | .ndo_select_queue = mlx4_en_select_queue, | 2489 | .ndo_select_queue = mlx4_en_select_queue, |
| 2485 | .ndo_get_stats = mlx4_en_get_stats, | 2490 | .ndo_get_stats64 = mlx4_en_get_stats64, |
| 2486 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, | 2491 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, |
| 2487 | .ndo_set_mac_address = mlx4_en_set_mac, | 2492 | .ndo_set_mac_address = mlx4_en_set_mac, |
| 2488 | .ndo_validate_addr = eth_validate_addr, | 2493 | .ndo_validate_addr = eth_validate_addr, |
| @@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
| 2514 | .ndo_stop = mlx4_en_close, | 2519 | .ndo_stop = mlx4_en_close, |
| 2515 | .ndo_start_xmit = mlx4_en_xmit, | 2520 | .ndo_start_xmit = mlx4_en_xmit, |
| 2516 | .ndo_select_queue = mlx4_en_select_queue, | 2521 | .ndo_select_queue = mlx4_en_select_queue, |
| 2517 | .ndo_get_stats = mlx4_en_get_stats, | 2522 | .ndo_get_stats64 = mlx4_en_get_stats64, |
| 2518 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, | 2523 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, |
| 2519 | .ndo_set_mac_address = mlx4_en_set_mac, | 2524 | .ndo_set_mac_address = mlx4_en_set_mac, |
| 2520 | .ndo_validate_addr = eth_validate_addr, | 2525 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 20b6c2e678b8..5aa8b751f417 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c | |||
| @@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 152 | struct mlx4_counter tmp_counter_stats; | 152 | struct mlx4_counter tmp_counter_stats; |
| 153 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; | 153 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; |
| 154 | struct mlx4_en_stat_out_flow_control_mbox *flowstats; | 154 | struct mlx4_en_stat_out_flow_control_mbox *flowstats; |
| 155 | struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); | 155 | struct net_device *dev = mdev->pndev[port]; |
| 156 | struct net_device_stats *stats = &priv->stats; | 156 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 157 | struct net_device_stats *stats = &dev->stats; | ||
| 157 | struct mlx4_cmd_mailbox *mailbox; | 158 | struct mlx4_cmd_mailbox *mailbox; |
| 158 | u64 in_mod = reset << 8 | port; | 159 | u64 in_mod = reset << 8 | port; |
| 159 | int err; | 160 | int err; |
| @@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 188 | } | 189 | } |
| 189 | stats->tx_packets = 0; | 190 | stats->tx_packets = 0; |
| 190 | stats->tx_bytes = 0; | 191 | stats->tx_bytes = 0; |
| 192 | stats->tx_dropped = 0; | ||
| 191 | priv->port_stats.tx_chksum_offload = 0; | 193 | priv->port_stats.tx_chksum_offload = 0; |
| 192 | priv->port_stats.queue_stopped = 0; | 194 | priv->port_stats.queue_stopped = 0; |
| 193 | priv->port_stats.wake_queue = 0; | 195 | priv->port_stats.wake_queue = 0; |
| @@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 199 | 201 | ||
| 200 | stats->tx_packets += ring->packets; | 202 | stats->tx_packets += ring->packets; |
| 201 | stats->tx_bytes += ring->bytes; | 203 | stats->tx_bytes += ring->bytes; |
| 204 | stats->tx_dropped += ring->tx_dropped; | ||
| 202 | priv->port_stats.tx_chksum_offload += ring->tx_csum; | 205 | priv->port_stats.tx_chksum_offload += ring->tx_csum; |
| 203 | priv->port_stats.queue_stopped += ring->queue_stopped; | 206 | priv->port_stats.queue_stopped += ring->queue_stopped; |
| 204 | priv->port_stats.wake_queue += ring->wake_queue; | 207 | priv->port_stats.wake_queue += ring->wake_queue; |
| @@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 237 | stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, | 240 | stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, |
| 238 | &mlx4_en_stats->MCAST_prio_1, | 241 | &mlx4_en_stats->MCAST_prio_1, |
| 239 | NUM_PRIORITIES); | 242 | NUM_PRIORITIES); |
| 240 | stats->collisions = 0; | ||
| 241 | stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + | 243 | stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + |
| 242 | sw_rx_dropped; | 244 | sw_rx_dropped; |
| 243 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); | 245 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); |
| 244 | stats->rx_over_errors = 0; | ||
| 245 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); | 246 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); |
| 246 | stats->rx_frame_errors = 0; | ||
| 247 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | 247 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); |
| 248 | stats->rx_missed_errors = 0; | 248 | stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP); |
| 249 | stats->tx_aborted_errors = 0; | ||
| 250 | stats->tx_carrier_errors = 0; | ||
| 251 | stats->tx_fifo_errors = 0; | ||
| 252 | stats->tx_heartbeat_errors = 0; | ||
| 253 | stats->tx_window_errors = 0; | ||
| 254 | stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); | ||
| 255 | 249 | ||
| 256 | /* RX stats */ | 250 | /* RX stats */ |
| 257 | priv->pkstats.rx_multicast_packets = stats->multicast; | 251 | priv->pkstats.rx_multicast_packets = stats->multicast; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f6e61570cb2c..76aa4d27183c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
| @@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 726 | bool inline_ok; | 726 | bool inline_ok; |
| 727 | u32 ring_cons; | 727 | u32 ring_cons; |
| 728 | 728 | ||
| 729 | if (!priv->port_up) | ||
| 730 | goto tx_drop; | ||
| 731 | |||
| 732 | tx_ind = skb_get_queue_mapping(skb); | 729 | tx_ind = skb_get_queue_mapping(skb); |
| 733 | ring = priv->tx_ring[tx_ind]; | 730 | ring = priv->tx_ring[tx_ind]; |
| 734 | 731 | ||
| 732 | if (!priv->port_up) | ||
| 733 | goto tx_drop; | ||
| 734 | |||
| 735 | /* fetch ring->cons far ahead before needing it to avoid stall */ | 735 | /* fetch ring->cons far ahead before needing it to avoid stall */ |
| 736 | ring_cons = ACCESS_ONCE(ring->cons); | 736 | ring_cons = ACCESS_ONCE(ring->cons); |
| 737 | 737 | ||
| @@ -1030,7 +1030,7 @@ tx_drop_unmap: | |||
| 1030 | 1030 | ||
| 1031 | tx_drop: | 1031 | tx_drop: |
| 1032 | dev_kfree_skb_any(skb); | 1032 | dev_kfree_skb_any(skb); |
| 1033 | priv->stats.tx_dropped++; | 1033 | ring->tx_dropped++; |
| 1034 | return NETDEV_TX_OK; | 1034 | return NETDEV_TX_OK; |
| 1035 | } | 1035 | } |
| 1036 | 1036 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index cc84e09f324a..467d47ed2c39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -270,6 +270,7 @@ struct mlx4_en_tx_ring { | |||
| 270 | unsigned long tx_csum; | 270 | unsigned long tx_csum; |
| 271 | unsigned long tso_packets; | 271 | unsigned long tso_packets; |
| 272 | unsigned long xmit_more; | 272 | unsigned long xmit_more; |
| 273 | unsigned int tx_dropped; | ||
| 273 | struct mlx4_bf bf; | 274 | struct mlx4_bf bf; |
| 274 | unsigned long queue_stopped; | 275 | unsigned long queue_stopped; |
| 275 | 276 | ||
| @@ -482,8 +483,6 @@ struct mlx4_en_priv { | |||
| 482 | struct mlx4_en_port_profile *prof; | 483 | struct mlx4_en_port_profile *prof; |
| 483 | struct net_device *dev; | 484 | struct net_device *dev; |
| 484 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 485 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
| 485 | struct net_device_stats stats; | ||
| 486 | struct net_device_stats ret_stats; | ||
| 487 | struct mlx4_en_port_state port_state; | 486 | struct mlx4_en_port_state port_state; |
| 488 | spinlock_t stats_lock; | 487 | spinlock_t stats_lock; |
| 489 | struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; | 488 | struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index fd4392999eee..f5c8d5db25a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) | |||
| 3192 | flush_workqueue(priv->wq); | 3192 | flush_workqueue(priv->wq); |
| 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { | 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { |
| 3194 | netif_device_detach(netdev); | 3194 | netif_device_detach(netdev); |
| 3195 | mutex_lock(&priv->state_lock); | 3195 | mlx5e_close(netdev); |
| 3196 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | ||
| 3197 | mlx5e_close_locked(netdev); | ||
| 3198 | mutex_unlock(&priv->state_lock); | ||
| 3199 | } else { | 3196 | } else { |
| 3200 | unregister_netdev(netdev); | 3197 | unregister_netdev(netdev); |
| 3201 | } | 3198 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 229ab16fb8d3..b000ddc29553 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 317 | while ((sq->pc & wq->sz_m1) > sq->edge) | 317 | while ((sq->pc & wq->sz_m1) > sq->edge) |
| 318 | mlx5e_send_nop(sq, false); | 318 | mlx5e_send_nop(sq, false); |
| 319 | 319 | ||
| 320 | sq->bf_budget = bf ? sq->bf_budget - 1 : 0; | 320 | if (bf) |
| 321 | sq->bf_budget--; | ||
| 321 | 322 | ||
| 322 | sq->stats.packets++; | 323 | sq->stats.packets++; |
| 323 | sq->stats.bytes += num_bytes; | 324 | sq->stats.bytes += num_bytes; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index b84a6918a700..aebbd6ccb9fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, | |||
| 383 | match_v, | 383 | match_v, |
| 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, |
| 385 | 0, &dest); | 385 | 0, &dest); |
| 386 | if (IS_ERR_OR_NULL(flow_rule)) { | 386 | if (IS_ERR(flow_rule)) { |
| 387 | pr_warn( | 387 | pr_warn( |
| 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", | 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", |
| 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); | 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); |
| @@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 457 | 457 | ||
| 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); | 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); |
| 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); | 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); |
| 460 | if (IS_ERR_OR_NULL(fdb)) { | 460 | if (IS_ERR(fdb)) { |
| 461 | err = PTR_ERR(fdb); | 461 | err = PTR_ERR(fdb); |
| 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); | 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); |
| 463 | goto out; | 463 | goto out; |
| @@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); | 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); |
| 475 | eth_broadcast_addr(dmac); | 475 | eth_broadcast_addr(dmac); |
| 476 | g = mlx5_create_flow_group(fdb, flow_group_in); | 476 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 477 | if (IS_ERR_OR_NULL(g)) { | 477 | if (IS_ERR(g)) { |
| 478 | err = PTR_ERR(g); | 478 | err = PTR_ERR(g); |
| 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); | 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); |
| 480 | goto out; | 480 | goto out; |
| @@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 489 | eth_zero_addr(dmac); | 489 | eth_zero_addr(dmac); |
| 490 | dmac[0] = 0x01; | 490 | dmac[0] = 0x01; |
| 491 | g = mlx5_create_flow_group(fdb, flow_group_in); | 491 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 492 | if (IS_ERR_OR_NULL(g)) { | 492 | if (IS_ERR(g)) { |
| 493 | err = PTR_ERR(g); | 493 | err = PTR_ERR(g); |
| 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); | 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); |
| 495 | goto out; | 495 | goto out; |
| @@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); | 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); |
| 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); | 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); |
| 508 | g = mlx5_create_flow_group(fdb, flow_group_in); | 508 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 509 | if (IS_ERR_OR_NULL(g)) { | 509 | if (IS_ERR(g)) { |
| 510 | err = PTR_ERR(g); | 510 | err = PTR_ERR(g); |
| 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); | 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); |
| 512 | goto out; | 512 | goto out; |
| @@ -529,7 +529,7 @@ out: | |||
| 529 | } | 529 | } |
| 530 | } | 530 | } |
| 531 | 531 | ||
| 532 | kfree(flow_group_in); | 532 | kvfree(flow_group_in); |
| 533 | return err; | 533 | return err; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| @@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, | |||
| 651 | esw_fdb_set_vport_rule(esw, | 651 | esw_fdb_set_vport_rule(esw, |
| 652 | mac, | 652 | mac, |
| 653 | vport_idx); | 653 | vport_idx); |
| 654 | iter_vaddr->mc_promisc = true; | ||
| 654 | break; | 655 | break; |
| 655 | case MLX5_ACTION_DEL: | 656 | case MLX5_ACTION_DEL: |
| 656 | if (!iter_vaddr) | 657 | if (!iter_vaddr) |
| @@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1060 | return; | 1061 | return; |
| 1061 | 1062 | ||
| 1062 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1063 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1063 | if (IS_ERR_OR_NULL(acl)) { | 1064 | if (IS_ERR(acl)) { |
| 1064 | err = PTR_ERR(acl); | 1065 | err = PTR_ERR(acl); |
| 1065 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", | 1066 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", |
| 1066 | vport->vport, err); | 1067 | vport->vport, err); |
| @@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1075 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1076 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1076 | 1077 | ||
| 1077 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); | 1078 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1078 | if (IS_ERR_OR_NULL(vlan_grp)) { | 1079 | if (IS_ERR(vlan_grp)) { |
| 1079 | err = PTR_ERR(vlan_grp); | 1080 | err = PTR_ERR(vlan_grp); |
| 1080 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", | 1081 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", |
| 1081 | vport->vport, err); | 1082 | vport->vport, err); |
| @@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1086 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); | 1087 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); |
| 1087 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1088 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1088 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); | 1089 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1089 | if (IS_ERR_OR_NULL(drop_grp)) { | 1090 | if (IS_ERR(drop_grp)) { |
| 1090 | err = PTR_ERR(drop_grp); | 1091 | err = PTR_ERR(drop_grp); |
| 1091 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", | 1092 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", |
| 1092 | vport->vport, err); | 1093 | vport->vport, err); |
| @@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1097 | vport->egress.drop_grp = drop_grp; | 1098 | vport->egress.drop_grp = drop_grp; |
| 1098 | vport->egress.allowed_vlans_grp = vlan_grp; | 1099 | vport->egress.allowed_vlans_grp = vlan_grp; |
| 1099 | out: | 1100 | out: |
| 1100 | kfree(flow_group_in); | 1101 | kvfree(flow_group_in); |
| 1101 | if (err && !IS_ERR_OR_NULL(vlan_grp)) | 1102 | if (err && !IS_ERR_OR_NULL(vlan_grp)) |
| 1102 | mlx5_destroy_flow_group(vlan_grp); | 1103 | mlx5_destroy_flow_group(vlan_grp); |
| 1103 | if (err && !IS_ERR_OR_NULL(acl)) | 1104 | if (err && !IS_ERR_OR_NULL(acl)) |
| @@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1174 | return; | 1175 | return; |
| 1175 | 1176 | ||
| 1176 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1177 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1177 | if (IS_ERR_OR_NULL(acl)) { | 1178 | if (IS_ERR(acl)) { |
| 1178 | err = PTR_ERR(acl); | 1179 | err = PTR_ERR(acl); |
| 1179 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", | 1180 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", |
| 1180 | vport->vport, err); | 1181 | vport->vport, err); |
| @@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1192 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1193 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1193 | 1194 | ||
| 1194 | g = mlx5_create_flow_group(acl, flow_group_in); | 1195 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1195 | if (IS_ERR_OR_NULL(g)) { | 1196 | if (IS_ERR(g)) { |
| 1196 | err = PTR_ERR(g); | 1197 | err = PTR_ERR(g); |
| 1197 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", | 1198 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", |
| 1198 | vport->vport, err); | 1199 | vport->vport, err); |
| @@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1207 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1208 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1208 | 1209 | ||
| 1209 | g = mlx5_create_flow_group(acl, flow_group_in); | 1210 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1210 | if (IS_ERR_OR_NULL(g)) { | 1211 | if (IS_ERR(g)) { |
| 1211 | err = PTR_ERR(g); | 1212 | err = PTR_ERR(g); |
| 1212 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", | 1213 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", |
| 1213 | vport->vport, err); | 1214 | vport->vport, err); |
| @@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1223 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); | 1224 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); |
| 1224 | 1225 | ||
| 1225 | g = mlx5_create_flow_group(acl, flow_group_in); | 1226 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1226 | if (IS_ERR_OR_NULL(g)) { | 1227 | if (IS_ERR(g)) { |
| 1227 | err = PTR_ERR(g); | 1228 | err = PTR_ERR(g); |
| 1228 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", | 1229 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", |
| 1229 | vport->vport, err); | 1230 | vport->vport, err); |
| @@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1236 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); | 1237 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); |
| 1237 | 1238 | ||
| 1238 | g = mlx5_create_flow_group(acl, flow_group_in); | 1239 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1239 | if (IS_ERR_OR_NULL(g)) { | 1240 | if (IS_ERR(g)) { |
| 1240 | err = PTR_ERR(g); | 1241 | err = PTR_ERR(g); |
| 1241 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", | 1242 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", |
| 1242 | vport->vport, err); | 1243 | vport->vport, err); |
| @@ -1259,7 +1260,7 @@ out: | |||
| 1259 | mlx5_destroy_flow_table(vport->ingress.acl); | 1260 | mlx5_destroy_flow_table(vport->ingress.acl); |
| 1260 | } | 1261 | } |
| 1261 | 1262 | ||
| 1262 | kfree(flow_group_in); | 1263 | kvfree(flow_group_in); |
| 1263 | } | 1264 | } |
| 1264 | 1265 | ||
| 1265 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, | 1266 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, |
| @@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1363 | match_v, | 1364 | match_v, |
| 1364 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1365 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1365 | 0, NULL); | 1366 | 0, NULL); |
| 1366 | if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { | 1367 | if (IS_ERR(vport->ingress.allow_rule)) { |
| 1367 | err = PTR_ERR(vport->ingress.allow_rule); | 1368 | err = PTR_ERR(vport->ingress.allow_rule); |
| 1368 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", | 1369 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", |
| 1369 | vport->vport, err); | 1370 | vport->vport, err); |
| @@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1380 | match_v, | 1381 | match_v, |
| 1381 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1382 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1382 | 0, NULL); | 1383 | 0, NULL); |
| 1383 | if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { | 1384 | if (IS_ERR(vport->ingress.drop_rule)) { |
| 1384 | err = PTR_ERR(vport->ingress.drop_rule); | 1385 | err = PTR_ERR(vport->ingress.drop_rule); |
| 1385 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", | 1386 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", |
| 1386 | vport->vport, err); | 1387 | vport->vport, err); |
| @@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1439 | match_v, | 1440 | match_v, |
| 1440 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1441 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1441 | 0, NULL); | 1442 | 0, NULL); |
| 1442 | if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { | 1443 | if (IS_ERR(vport->egress.allowed_vlan)) { |
| 1443 | err = PTR_ERR(vport->egress.allowed_vlan); | 1444 | err = PTR_ERR(vport->egress.allowed_vlan); |
| 1444 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", | 1445 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", |
| 1445 | vport->vport, err); | 1446 | vport->vport, err); |
| @@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1457 | match_v, | 1458 | match_v, |
| 1458 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1459 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1459 | 0, NULL); | 1460 | 0, NULL); |
| 1460 | if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { | 1461 | if (IS_ERR(vport->egress.drop_rule)) { |
| 1461 | err = PTR_ERR(vport->egress.drop_rule); | 1462 | err = PTR_ERR(vport->egress.drop_rule); |
| 1462 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", | 1463 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", |
| 1463 | vport->vport, err); | 1464 | vport->vport, err); |
| @@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
| 1491 | 1492 | ||
| 1492 | /* Sync with current vport context */ | 1493 | /* Sync with current vport context */ |
| 1493 | vport->enabled_events = enable_events; | 1494 | vport->enabled_events = enable_events; |
| 1494 | esw_vport_change_handle_locked(vport); | ||
| 1495 | |||
| 1496 | vport->enabled = true; | 1495 | vport->enabled = true; |
| 1497 | 1496 | ||
| 1498 | /* only PF is trusted by default */ | 1497 | /* only PF is trusted by default */ |
| 1499 | vport->trusted = (vport_num) ? false : true; | 1498 | vport->trusted = (vport_num) ? false : true; |
| 1500 | 1499 | esw_vport_change_handle_locked(vport); | |
| 1501 | arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); | ||
| 1502 | 1500 | ||
| 1503 | esw->enabled_vports++; | 1501 | esw->enabled_vports++; |
| 1504 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); | 1502 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); |
| @@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) | |||
| 1728 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) | 1726 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) |
| 1729 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) | 1727 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) |
| 1730 | 1728 | ||
| 1729 | static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) | ||
| 1730 | { | ||
| 1731 | ((u8 *)node_guid)[7] = mac[0]; | ||
| 1732 | ((u8 *)node_guid)[6] = mac[1]; | ||
| 1733 | ((u8 *)node_guid)[5] = mac[2]; | ||
| 1734 | ((u8 *)node_guid)[4] = 0xff; | ||
| 1735 | ((u8 *)node_guid)[3] = 0xfe; | ||
| 1736 | ((u8 *)node_guid)[2] = mac[3]; | ||
| 1737 | ((u8 *)node_guid)[1] = mac[4]; | ||
| 1738 | ((u8 *)node_guid)[0] = mac[5]; | ||
| 1739 | } | ||
| 1740 | |||
| 1731 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | 1741 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, |
| 1732 | int vport, u8 mac[ETH_ALEN]) | 1742 | int vport, u8 mac[ETH_ALEN]) |
| 1733 | { | 1743 | { |
| 1734 | int err = 0; | ||
| 1735 | struct mlx5_vport *evport; | 1744 | struct mlx5_vport *evport; |
| 1745 | u64 node_guid; | ||
| 1746 | int err = 0; | ||
| 1736 | 1747 | ||
| 1737 | if (!ESW_ALLOWED(esw)) | 1748 | if (!ESW_ALLOWED(esw)) |
| 1738 | return -EPERM; | 1749 | return -EPERM; |
| @@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
| 1756 | return err; | 1767 | return err; |
| 1757 | } | 1768 | } |
| 1758 | 1769 | ||
| 1770 | node_guid_gen_from_mac(&node_guid, mac); | ||
| 1771 | err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid); | ||
| 1772 | if (err) | ||
| 1773 | mlx5_core_warn(esw->dev, | ||
| 1774 | "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", | ||
| 1775 | vport, err); | ||
| 1776 | |||
| 1759 | mutex_lock(&esw->state_lock); | 1777 | mutex_lock(&esw->state_lock); |
| 1760 | if (evport->enabled) | 1778 | if (evport->enabled) |
| 1761 | err = esw_vport_ingress_config(esw, evport); | 1779 | err = esw_vport_ingress_config(esw, evport); |
| 1762 | mutex_unlock(&esw->state_lock); | 1780 | mutex_unlock(&esw->state_lock); |
| 1763 | |||
| 1764 | return err; | 1781 | return err; |
| 1765 | } | 1782 | } |
| 1766 | 1783 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 8b5f0b2c0d5c..e912a3d2505e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) | |||
| 1292 | ft->id); | 1292 | ft->id); |
| 1293 | return err; | 1293 | return err; |
| 1294 | } | 1294 | } |
| 1295 | root->root_ft = new_root_ft; | ||
| 1296 | } | 1295 | } |
| 1296 | root->root_ft = new_root_ft; | ||
| 1297 | return 0; | 1297 | return 0; |
| 1298 | } | 1298 | } |
| 1299 | 1299 | ||
| @@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev) | |||
| 1767 | 1767 | ||
| 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) | 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) |
| 1769 | { | 1769 | { |
| 1770 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1771 | return; | ||
| 1772 | |||
| 1770 | cleanup_root_ns(dev); | 1773 | cleanup_root_ns(dev); |
| 1771 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); | 1774 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); |
| 1772 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); | 1775 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); |
| @@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) | |||
| 1828 | { | 1831 | { |
| 1829 | int err = 0; | 1832 | int err = 0; |
| 1830 | 1833 | ||
| 1834 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1835 | return 0; | ||
| 1836 | |||
| 1831 | err = mlx5_init_fc_stats(dev); | 1837 | err = mlx5_init_fc_stats(dev); |
| 1832 | if (err) | 1838 | if (err) |
| 1833 | return err; | 1839 | return err; |
| 1834 | 1840 | ||
| 1835 | if (MLX5_CAP_GEN(dev, nic_flow_table)) { | 1841 | if (MLX5_CAP_GEN(dev, nic_flow_table) && |
| 1842 | MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { | ||
| 1836 | err = init_root_ns(dev); | 1843 | err = init_root_ns(dev); |
| 1837 | if (err) | 1844 | if (err) |
| 1838 | goto err; | 1845 | goto err; |
| 1839 | } | 1846 | } |
| 1847 | |||
| 1840 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 1848 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { |
| 1841 | err = init_fdb_root_ns(dev); | 1849 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { |
| 1842 | if (err) | 1850 | err = init_fdb_root_ns(dev); |
| 1843 | goto err; | 1851 | if (err) |
| 1844 | } | 1852 | goto err; |
| 1845 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { | 1853 | } |
| 1846 | err = init_egress_acl_root_ns(dev); | 1854 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { |
| 1847 | if (err) | 1855 | err = init_egress_acl_root_ns(dev); |
| 1848 | goto err; | 1856 | if (err) |
| 1849 | } | 1857 | goto err; |
| 1850 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { | 1858 | } |
| 1851 | err = init_ingress_acl_root_ns(dev); | 1859 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { |
| 1852 | if (err) | 1860 | err = init_ingress_acl_root_ns(dev); |
| 1853 | goto err; | 1861 | if (err) |
| 1862 | goto err; | ||
| 1863 | } | ||
| 1854 | } | 1864 | } |
| 1855 | 1865 | ||
| 1856 | return 0; | 1866 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b720a274220d..b82d65802d96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
| @@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) | |||
| 418 | if (out.hdr.status) | 418 | if (out.hdr.status) |
| 419 | err = mlx5_cmd_status_to_err(&out.hdr); | 419 | err = mlx5_cmd_status_to_err(&out.hdr); |
| 420 | else | 420 | else |
| 421 | *xrcdn = be32_to_cpu(out.xrcdn); | 421 | *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; |
| 422 | 422 | ||
| 423 | return err; | 423 | return err; |
| 424 | } | 424 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b69dadcfb897..daf44cd4c566 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
| @@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) | |||
| 508 | } | 508 | } |
| 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); | 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); |
| 510 | 510 | ||
| 511 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | ||
| 512 | u32 vport, u64 node_guid) | ||
| 513 | { | ||
| 514 | int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); | ||
| 515 | void *nic_vport_context; | ||
| 516 | u8 *guid; | ||
| 517 | void *in; | ||
| 518 | int err; | ||
| 519 | |||
| 520 | if (!vport) | ||
| 521 | return -EINVAL; | ||
| 522 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | ||
| 523 | return -EACCES; | ||
| 524 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) | ||
| 525 | return -ENOTSUPP; | ||
| 526 | |||
| 527 | in = mlx5_vzalloc(inlen); | ||
| 528 | if (!in) | ||
| 529 | return -ENOMEM; | ||
| 530 | |||
| 531 | MLX5_SET(modify_nic_vport_context_in, in, | ||
| 532 | field_select.node_guid, 1); | ||
| 533 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | ||
| 534 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport); | ||
| 535 | |||
| 536 | nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, | ||
| 537 | in, nic_vport_context); | ||
| 538 | guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context, | ||
| 539 | node_guid); | ||
| 540 | MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); | ||
| 541 | |||
| 542 | err = mlx5_modify_nic_vport_context(mdev, in, inlen); | ||
| 543 | |||
| 544 | kvfree(in); | ||
| 545 | |||
| 546 | return err; | ||
| 547 | } | ||
| 548 | |||
| 511 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, | 549 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, |
| 512 | u16 *qkey_viol_cntr) | 550 | u16 *qkey_viol_cntr) |
| 513 | { | 551 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4a7273771028..6f9e3ddff4a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) | |||
| 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); | 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | 250 | static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 251 | u8 swid) | ||
| 251 | { | 252 | { |
| 252 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; | 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; |
| 254 | 254 | ||
| 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); | 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); |
| 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); | 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | ||
| 260 | { | ||
| 261 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 262 | |||
| 263 | return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, | ||
| 264 | swid); | ||
| 265 | } | ||
| 266 | |||
| 259 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, | 267 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, |
| 260 | bool enable) | 268 | bool enable) |
| 261 | { | 269 | { |
| @@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 305 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); | 313 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); |
| 306 | } | 314 | } |
| 307 | 315 | ||
| 308 | static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | 316 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, |
| 309 | u8 local_port, u8 *p_module, | 317 | u8 local_port, u8 *p_module, |
| 310 | u8 *p_width, u8 *p_lane) | 318 | u8 *p_width, u8 *p_lane) |
| 311 | { | 319 | { |
| 312 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; | 320 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; |
| 313 | int err; | 321 | int err; |
| @@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | |||
| 322 | return 0; | 330 | return 0; |
| 323 | } | 331 | } |
| 324 | 332 | ||
| 325 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | ||
| 326 | u8 local_port, u8 *p_module, | ||
| 327 | u8 *p_width) | ||
| 328 | { | ||
| 329 | u8 lane; | ||
| 330 | |||
| 331 | return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module, | ||
| 332 | p_width, &lane); | ||
| 333 | } | ||
| 334 | |||
| 335 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 333 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 336 | u8 module, u8 width, u8 lane) | 334 | u8 module, u8 width, u8 lane) |
| 337 | { | 335 | { |
| @@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, | |||
| 949 | size_t len) | 947 | size_t len) |
| 950 | { | 948 | { |
| 951 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 949 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
| 952 | u8 module, width, lane; | 950 | u8 module = mlxsw_sp_port->mapping.module; |
| 951 | u8 width = mlxsw_sp_port->mapping.width; | ||
| 952 | u8 lane = mlxsw_sp_port->mapping.lane; | ||
| 953 | int err; | 953 | int err; |
| 954 | 954 | ||
| 955 | err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp, | ||
| 956 | mlxsw_sp_port->local_port, | ||
| 957 | &module, &width, &lane); | ||
| 958 | if (err) { | ||
| 959 | netdev_err(dev, "Failed to retrieve module information\n"); | ||
| 960 | return err; | ||
| 961 | } | ||
| 962 | |||
| 963 | if (!mlxsw_sp_port->split) | 955 | if (!mlxsw_sp_port->split) |
| 964 | err = snprintf(name, len, "p%d", module + 1); | 956 | err = snprintf(name, len, "p%d", module + 1); |
| 965 | else | 957 | else |
| @@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 1681 | return 0; | 1673 | return 0; |
| 1682 | } | 1674 | } |
| 1683 | 1675 | ||
| 1684 | static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 1676 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 1685 | bool split, u8 module, u8 width) | 1677 | bool split, u8 module, u8 width, u8 lane) |
| 1686 | { | 1678 | { |
| 1687 | struct mlxsw_sp_port *mlxsw_sp_port; | 1679 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1688 | struct net_device *dev; | 1680 | struct net_device *dev; |
| @@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
| 1697 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; | 1689 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; |
| 1698 | mlxsw_sp_port->local_port = local_port; | 1690 | mlxsw_sp_port->local_port = local_port; |
| 1699 | mlxsw_sp_port->split = split; | 1691 | mlxsw_sp_port->split = split; |
| 1692 | mlxsw_sp_port->mapping.module = module; | ||
| 1693 | mlxsw_sp_port->mapping.width = width; | ||
| 1694 | mlxsw_sp_port->mapping.lane = lane; | ||
| 1700 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); | 1695 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); |
| 1701 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); | 1696 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); |
| 1702 | if (!mlxsw_sp_port->active_vlans) { | 1697 | if (!mlxsw_sp_port->active_vlans) { |
| @@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc: | |||
| 1839 | return err; | 1834 | return err; |
| 1840 | } | 1835 | } |
| 1841 | 1836 | ||
| 1842 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | ||
| 1843 | bool split, u8 module, u8 width, u8 lane) | ||
| 1844 | { | ||
| 1845 | int err; | ||
| 1846 | |||
| 1847 | err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1848 | lane); | ||
| 1849 | if (err) | ||
| 1850 | return err; | ||
| 1851 | |||
| 1852 | err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, | ||
| 1853 | width); | ||
| 1854 | if (err) | ||
| 1855 | goto err_port_create; | ||
| 1856 | |||
| 1857 | return 0; | ||
| 1858 | |||
| 1859 | err_port_create: | ||
| 1860 | mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); | ||
| 1861 | return err; | ||
| 1862 | } | ||
| 1863 | |||
| 1864 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) | 1837 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) |
| 1865 | { | 1838 | { |
| 1866 | struct net_device *dev = mlxsw_sp_port->dev; | 1839 | struct net_device *dev = mlxsw_sp_port->dev; |
| @@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) | |||
| 1909 | 1882 | ||
| 1910 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | 1883 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) |
| 1911 | { | 1884 | { |
| 1885 | u8 module, width, lane; | ||
| 1912 | size_t alloc_size; | 1886 | size_t alloc_size; |
| 1913 | u8 module, width; | ||
| 1914 | int i; | 1887 | int i; |
| 1915 | int err; | 1888 | int err; |
| 1916 | 1889 | ||
| @@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | |||
| 1921 | 1894 | ||
| 1922 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { | 1895 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { |
| 1923 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, | 1896 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, |
| 1924 | &width); | 1897 | &width, &lane); |
| 1925 | if (err) | 1898 | if (err) |
| 1926 | goto err_port_module_info_get; | 1899 | goto err_port_module_info_get; |
| 1927 | if (!width) | 1900 | if (!width) |
| 1928 | continue; | 1901 | continue; |
| 1929 | mlxsw_sp->port_to_module[i] = module; | 1902 | mlxsw_sp->port_to_module[i] = module; |
| 1930 | err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); | 1903 | err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, |
| 1904 | lane); | ||
| 1931 | if (err) | 1905 | if (err) |
| 1932 | goto err_port_create; | 1906 | goto err_port_create; |
| 1933 | } | 1907 | } |
| @@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) | |||
| 1948 | return local_port - offset; | 1922 | return local_port - offset; |
| 1949 | } | 1923 | } |
| 1950 | 1924 | ||
| 1925 | static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, | ||
| 1926 | u8 module, unsigned int count) | ||
| 1927 | { | ||
| 1928 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1929 | int err, i; | ||
| 1930 | |||
| 1931 | for (i = 0; i < count; i++) { | ||
| 1932 | err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, | ||
| 1933 | width, i * width); | ||
| 1934 | if (err) | ||
| 1935 | goto err_port_module_map; | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | for (i = 0; i < count; i++) { | ||
| 1939 | err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); | ||
| 1940 | if (err) | ||
| 1941 | goto err_port_swid_set; | ||
| 1942 | } | ||
| 1943 | |||
| 1944 | for (i = 0; i < count; i++) { | ||
| 1945 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | ||
| 1946 | module, width, i * width); | ||
| 1947 | if (err) | ||
| 1948 | goto err_port_create; | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | return 0; | ||
| 1952 | |||
| 1953 | err_port_create: | ||
| 1954 | for (i--; i >= 0; i--) | ||
| 1955 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 1956 | i = count; | ||
| 1957 | err_port_swid_set: | ||
| 1958 | for (i--; i >= 0; i--) | ||
| 1959 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, | ||
| 1960 | MLXSW_PORT_SWID_DISABLED_PORT); | ||
| 1961 | i = count; | ||
| 1962 | err_port_module_map: | ||
| 1963 | for (i--; i >= 0; i--) | ||
| 1964 | mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); | ||
| 1965 | return err; | ||
| 1966 | } | ||
| 1967 | |||
| 1968 | static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, | ||
| 1969 | u8 base_port, unsigned int count) | ||
| 1970 | { | ||
| 1971 | u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; | ||
| 1972 | int i; | ||
| 1973 | |||
| 1974 | /* Split by four means we need to re-create two ports, otherwise | ||
| 1975 | * only one. | ||
| 1976 | */ | ||
| 1977 | count = count / 2; | ||
| 1978 | |||
| 1979 | for (i = 0; i < count; i++) { | ||
| 1980 | local_port = base_port + i * 2; | ||
| 1981 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1982 | |||
| 1983 | mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1984 | 0); | ||
| 1985 | } | ||
| 1986 | |||
| 1987 | for (i = 0; i < count; i++) | ||
| 1988 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); | ||
| 1989 | |||
| 1990 | for (i = 0; i < count; i++) { | ||
| 1991 | local_port = base_port + i * 2; | ||
| 1992 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1993 | |||
| 1994 | mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, | ||
| 1995 | width, 0); | ||
| 1996 | } | ||
| 1997 | } | ||
| 1998 | |||
| 1951 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | 1999 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, |
| 1952 | unsigned int count) | 2000 | unsigned int count) |
| 1953 | { | 2001 | { |
| 1954 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2002 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 1955 | struct mlxsw_sp_port *mlxsw_sp_port; | 2003 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1956 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1957 | u8 module, cur_width, base_port; | 2004 | u8 module, cur_width, base_port; |
| 1958 | int i; | 2005 | int i; |
| 1959 | int err; | 2006 | int err; |
| @@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 1965 | return -EINVAL; | 2012 | return -EINVAL; |
| 1966 | } | 2013 | } |
| 1967 | 2014 | ||
| 2015 | module = mlxsw_sp_port->mapping.module; | ||
| 2016 | cur_width = mlxsw_sp_port->mapping.width; | ||
| 2017 | |||
| 1968 | if (count != 2 && count != 4) { | 2018 | if (count != 2 && count != 4) { |
| 1969 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); | 2019 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); |
| 1970 | return -EINVAL; | 2020 | return -EINVAL; |
| 1971 | } | 2021 | } |
| 1972 | 2022 | ||
| 1973 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | ||
| 1974 | &cur_width); | ||
| 1975 | if (err) { | ||
| 1976 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 1977 | return err; | ||
| 1978 | } | ||
| 1979 | |||
| 1980 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { | 2023 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { |
| 1981 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); | 2024 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); |
| 1982 | return -EINVAL; | 2025 | return -EINVAL; |
| @@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 2001 | for (i = 0; i < count; i++) | 2044 | for (i = 0; i < count; i++) |
| 2002 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2045 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2003 | 2046 | ||
| 2004 | for (i = 0; i < count; i++) { | 2047 | err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); |
| 2005 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | 2048 | if (err) { |
| 2006 | module, width, i * width); | 2049 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); |
| 2007 | if (err) { | 2050 | goto err_port_split_create; |
| 2008 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); | ||
| 2009 | goto err_port_create; | ||
| 2010 | } | ||
| 2011 | } | 2051 | } |
| 2012 | 2052 | ||
| 2013 | return 0; | 2053 | return 0; |
| 2014 | 2054 | ||
| 2015 | err_port_create: | 2055 | err_port_split_create: |
| 2016 | for (i--; i >= 0; i--) | 2056 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2017 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 2018 | for (i = 0; i < count / 2; i++) { | ||
| 2019 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2020 | mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2021 | module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); | ||
| 2022 | } | ||
| 2023 | return err; | 2057 | return err; |
| 2024 | } | 2058 | } |
| 2025 | 2059 | ||
| @@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2027 | { | 2061 | { |
| 2028 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2062 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 2029 | struct mlxsw_sp_port *mlxsw_sp_port; | 2063 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 2030 | u8 module, cur_width, base_port; | 2064 | u8 cur_width, base_port; |
| 2031 | unsigned int count; | 2065 | unsigned int count; |
| 2032 | int i; | 2066 | int i; |
| 2033 | int err; | ||
| 2034 | 2067 | ||
| 2035 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; | 2068 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; |
| 2036 | if (!mlxsw_sp_port) { | 2069 | if (!mlxsw_sp_port) { |
| @@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2044 | return -EINVAL; | 2077 | return -EINVAL; |
| 2045 | } | 2078 | } |
| 2046 | 2079 | ||
| 2047 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | 2080 | cur_width = mlxsw_sp_port->mapping.width; |
| 2048 | &cur_width); | ||
| 2049 | if (err) { | ||
| 2050 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 2051 | return err; | ||
| 2052 | } | ||
| 2053 | count = cur_width == 1 ? 4 : 2; | 2081 | count = cur_width == 1 ? 4 : 2; |
| 2054 | 2082 | ||
| 2055 | base_port = mlxsw_sp_cluster_base_port_get(local_port); | 2083 | base_port = mlxsw_sp_cluster_base_port_get(local_port); |
| @@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2061 | for (i = 0; i < count; i++) | 2089 | for (i = 0; i < count; i++) |
| 2062 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2090 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2063 | 2091 | ||
| 2064 | for (i = 0; i < count / 2; i++) { | 2092 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2065 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2066 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2067 | module, MLXSW_PORT_MODULE_MAX_WIDTH, | ||
| 2068 | 0); | ||
| 2069 | if (err) | ||
| 2070 | dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); | ||
| 2071 | } | ||
| 2072 | 2093 | ||
| 2073 | return 0; | 2094 | return 0; |
| 2074 | } | 2095 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e2c022d3e2f3..13b30eaa13d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
| @@ -229,6 +229,11 @@ struct mlxsw_sp_port { | |||
| 229 | struct ieee_maxrate *maxrate; | 229 | struct ieee_maxrate *maxrate; |
| 230 | struct ieee_pfc *pfc; | 230 | struct ieee_pfc *pfc; |
| 231 | } dcb; | 231 | } dcb; |
| 232 | struct { | ||
| 233 | u8 module; | ||
| 234 | u8 width; | ||
| 235 | u8 lane; | ||
| 236 | } mapping; | ||
| 232 | /* 802.1Q bridge VLANs */ | 237 | /* 802.1Q bridge VLANs */ |
| 233 | unsigned long *active_vlans; | 238 | unsigned long *active_vlans; |
| 234 | unsigned long *untagged_vlans; | 239 | unsigned long *untagged_vlans; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index cbf58e1f9333..21ec1c2df2c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
| @@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 192 | struct dcbx_app_priority_entry *p_tbl, | 192 | struct dcbx_app_priority_entry *p_tbl, |
| 193 | u32 pri_tc_tbl, int count, bool dcbx_enabled) | 193 | u32 pri_tc_tbl, int count, bool dcbx_enabled) |
| 194 | { | 194 | { |
| 195 | u8 tc, priority, priority_map; | 195 | u8 tc, priority_map; |
| 196 | enum dcbx_protocol_type type; | 196 | enum dcbx_protocol_type type; |
| 197 | u16 protocol_id; | 197 | u16 protocol_id; |
| 198 | int priority; | ||
| 198 | bool enable; | 199 | bool enable; |
| 199 | int i; | 200 | int i; |
| 200 | 201 | ||
| @@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 221 | * indication, but we only got here if there was an | 222 | * indication, but we only got here if there was an |
| 222 | * app tlv for the protocol, so dcbx must be enabled. | 223 | * app tlv for the protocol, so dcbx must be enabled. |
| 223 | */ | 224 | */ |
| 224 | enable = !!(type == DCBX_PROTOCOL_ETH); | 225 | enable = !(type == DCBX_PROTOCOL_ETH); |
| 225 | 226 | ||
| 226 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, | 227 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, |
| 227 | priority, tc, type); | 228 | priority, tc, type); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 089016f46f26..2d89e8c16b32 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev) | |||
| 155 | } | 155 | } |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | 158 | static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) |
| 159 | { | 159 | { |
| 160 | u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; | 160 | u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; |
| 161 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; | 161 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
| 162 | struct init_qm_port_params *p_qm_port; | 162 | struct init_qm_port_params *p_qm_port; |
| 163 | u16 num_pqs, multi_cos_tcs = 1; | 163 | u16 num_pqs, multi_cos_tcs = 1; |
| 164 | u8 pf_wfq = qm_info->pf_wfq; | ||
| 165 | u32 pf_rl = qm_info->pf_rl; | ||
| 164 | u16 num_vfs = 0; | 166 | u16 num_vfs = 0; |
| 165 | 167 | ||
| 166 | #ifdef CONFIG_QED_SRIOV | 168 | #ifdef CONFIG_QED_SRIOV |
| @@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | |||
| 182 | 184 | ||
| 183 | /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. | 185 | /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. |
| 184 | */ | 186 | */ |
| 185 | qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * | 187 | qm_info->qm_pq_params = kcalloc(num_pqs, |
| 186 | num_pqs, GFP_KERNEL); | 188 | sizeof(struct init_qm_pq_params), |
| 189 | b_sleepable ? GFP_KERNEL : GFP_ATOMIC); | ||
| 187 | if (!qm_info->qm_pq_params) | 190 | if (!qm_info->qm_pq_params) |
| 188 | goto alloc_err; | 191 | goto alloc_err; |
| 189 | 192 | ||
| 190 | qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * | 193 | qm_info->qm_vport_params = kcalloc(num_vports, |
| 191 | num_vports, GFP_KERNEL); | 194 | sizeof(struct init_qm_vport_params), |
| 195 | b_sleepable ? GFP_KERNEL | ||
| 196 | : GFP_ATOMIC); | ||
| 192 | if (!qm_info->qm_vport_params) | 197 | if (!qm_info->qm_vport_params) |
| 193 | goto alloc_err; | 198 | goto alloc_err; |
| 194 | 199 | ||
| 195 | qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * | 200 | qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS, |
| 196 | MAX_NUM_PORTS, GFP_KERNEL); | 201 | sizeof(struct init_qm_port_params), |
| 202 | b_sleepable ? GFP_KERNEL | ||
| 203 | : GFP_ATOMIC); | ||
| 197 | if (!qm_info->qm_port_params) | 204 | if (!qm_info->qm_port_params) |
| 198 | goto alloc_err; | 205 | goto alloc_err; |
| 199 | 206 | ||
| 200 | qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), | 207 | qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data), |
| 201 | GFP_KERNEL); | 208 | b_sleepable ? GFP_KERNEL : GFP_ATOMIC); |
| 202 | if (!qm_info->wfq_data) | 209 | if (!qm_info->wfq_data) |
| 203 | goto alloc_err; | 210 | goto alloc_err; |
| 204 | 211 | ||
| @@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | |||
| 264 | for (i = 0; i < qm_info->num_vports; i++) | 271 | for (i = 0; i < qm_info->num_vports; i++) |
| 265 | qm_info->qm_vport_params[i].vport_wfq = 1; | 272 | qm_info->qm_vport_params[i].vport_wfq = 1; |
| 266 | 273 | ||
| 267 | qm_info->pf_wfq = 0; | ||
| 268 | qm_info->pf_rl = 0; | ||
| 269 | qm_info->vport_rl_en = 1; | 274 | qm_info->vport_rl_en = 1; |
| 270 | qm_info->vport_wfq_en = 1; | 275 | qm_info->vport_wfq_en = 1; |
| 276 | qm_info->pf_rl = pf_rl; | ||
| 277 | qm_info->pf_wfq = pf_wfq; | ||
| 271 | 278 | ||
| 272 | return 0; | 279 | return 0; |
| 273 | 280 | ||
| @@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 299 | qed_qm_info_free(p_hwfn); | 306 | qed_qm_info_free(p_hwfn); |
| 300 | 307 | ||
| 301 | /* initialize qed's qm data structure */ | 308 | /* initialize qed's qm data structure */ |
| 302 | rc = qed_init_qm_info(p_hwfn); | 309 | rc = qed_init_qm_info(p_hwfn, false); |
| 303 | if (rc) | 310 | if (rc) |
| 304 | return rc; | 311 | return rc; |
| 305 | 312 | ||
| @@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev) | |||
| 388 | goto alloc_err; | 395 | goto alloc_err; |
| 389 | 396 | ||
| 390 | /* Prepare and process QM requirements */ | 397 | /* Prepare and process QM requirements */ |
| 391 | rc = qed_init_qm_info(p_hwfn); | 398 | rc = qed_init_qm_info(p_hwfn, true); |
| 392 | if (rc) | 399 | if (rc) |
| 393 | goto alloc_err; | 400 | goto alloc_err; |
| 394 | 401 | ||
| @@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) | |||
| 581 | 588 | ||
| 582 | hw_mode |= 1 << MODE_ASIC; | 589 | hw_mode |= 1 << MODE_ASIC; |
| 583 | 590 | ||
| 591 | if (p_hwfn->cdev->num_hwfns > 1) | ||
| 592 | hw_mode |= 1 << MODE_100G; | ||
| 593 | |||
| 584 | p_hwfn->hw_info.hw_mode = hw_mode; | 594 | p_hwfn->hw_info.hw_mode = hw_mode; |
| 595 | |||
| 596 | DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), | ||
| 597 | "Configuring function for hw_mode: 0x%08x\n", | ||
| 598 | p_hwfn->hw_info.hw_mode); | ||
| 585 | } | 599 | } |
| 586 | 600 | ||
| 587 | /* Init run time data for all PFs on an engine. */ | 601 | /* Init run time data for all PFs on an engine. */ |
| @@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev, | |||
| 821 | u32 load_code, param; | 835 | u32 load_code, param; |
| 822 | int rc, mfw_rc, i; | 836 | int rc, mfw_rc, i; |
| 823 | 837 | ||
| 838 | if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { | ||
| 839 | DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); | ||
| 840 | return -EINVAL; | ||
| 841 | } | ||
| 842 | |||
| 824 | if (IS_PF(cdev)) { | 843 | if (IS_PF(cdev)) { |
| 825 | rc = qed_init_fw_data(cdev, bin_fw_data); | 844 | rc = qed_init_fw_data(cdev, bin_fw_data); |
| 826 | if (rc != 0) | 845 | if (rc != 0) |
| @@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) | |||
| 2086 | { | 2105 | { |
| 2087 | int i; | 2106 | int i; |
| 2088 | 2107 | ||
| 2108 | if (cdev->num_hwfns > 1) { | ||
| 2109 | DP_VERBOSE(cdev, | ||
| 2110 | NETIF_MSG_LINK, | ||
| 2111 | "WFQ configuration is not supported for this device\n"); | ||
| 2112 | return; | ||
| 2113 | } | ||
| 2114 | |||
| 2089 | for_each_hwfn(cdev, i) { | 2115 | for_each_hwfn(cdev, i) { |
| 2090 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 2116 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 2091 | 2117 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 8b22f87033ce..61cc6869fa65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) | |||
| 413 | /* Fallthrough */ | 413 | /* Fallthrough */ |
| 414 | 414 | ||
| 415 | case QED_INT_MODE_MSI: | 415 | case QED_INT_MODE_MSI: |
| 416 | rc = pci_enable_msi(cdev->pdev); | 416 | if (cdev->num_hwfns == 1) { |
| 417 | if (!rc) { | 417 | rc = pci_enable_msi(cdev->pdev); |
| 418 | int_params->out.int_mode = QED_INT_MODE_MSI; | 418 | if (!rc) { |
| 419 | goto out; | 419 | int_params->out.int_mode = QED_INT_MODE_MSI; |
| 420 | } | 420 | goto out; |
| 421 | } | ||
| 421 | 422 | ||
| 422 | DP_NOTICE(cdev, "Failed to enable MSI\n"); | 423 | DP_NOTICE(cdev, "Failed to enable MSI\n"); |
| 423 | if (force_mode) | 424 | if (force_mode) |
| 424 | goto out; | 425 | goto out; |
| 426 | } | ||
| 425 | /* Fallthrough */ | 427 | /* Fallthrough */ |
| 426 | 428 | ||
| 427 | case QED_INT_MODE_INTA: | 429 | case QED_INT_MODE_INTA: |
| @@ -1103,6 +1105,39 @@ static int qed_get_port_type(u32 media_type) | |||
| 1103 | return port_type; | 1105 | return port_type; |
| 1104 | } | 1106 | } |
| 1105 | 1107 | ||
| 1108 | static int qed_get_link_data(struct qed_hwfn *hwfn, | ||
| 1109 | struct qed_mcp_link_params *params, | ||
| 1110 | struct qed_mcp_link_state *link, | ||
| 1111 | struct qed_mcp_link_capabilities *link_caps) | ||
| 1112 | { | ||
| 1113 | void *p; | ||
| 1114 | |||
| 1115 | if (!IS_PF(hwfn->cdev)) { | ||
| 1116 | qed_vf_get_link_params(hwfn, params); | ||
| 1117 | qed_vf_get_link_state(hwfn, link); | ||
| 1118 | qed_vf_get_link_caps(hwfn, link_caps); | ||
| 1119 | |||
| 1120 | return 0; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | p = qed_mcp_get_link_params(hwfn); | ||
| 1124 | if (!p) | ||
| 1125 | return -ENXIO; | ||
| 1126 | memcpy(params, p, sizeof(*params)); | ||
| 1127 | |||
| 1128 | p = qed_mcp_get_link_state(hwfn); | ||
| 1129 | if (!p) | ||
| 1130 | return -ENXIO; | ||
| 1131 | memcpy(link, p, sizeof(*link)); | ||
| 1132 | |||
| 1133 | p = qed_mcp_get_link_capabilities(hwfn); | ||
| 1134 | if (!p) | ||
| 1135 | return -ENXIO; | ||
| 1136 | memcpy(link_caps, p, sizeof(*link_caps)); | ||
| 1137 | |||
| 1138 | return 0; | ||
| 1139 | } | ||
| 1140 | |||
| 1106 | static void qed_fill_link(struct qed_hwfn *hwfn, | 1141 | static void qed_fill_link(struct qed_hwfn *hwfn, |
| 1107 | struct qed_link_output *if_link) | 1142 | struct qed_link_output *if_link) |
| 1108 | { | 1143 | { |
| @@ -1114,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, | |||
| 1114 | memset(if_link, 0, sizeof(*if_link)); | 1149 | memset(if_link, 0, sizeof(*if_link)); |
| 1115 | 1150 | ||
| 1116 | /* Prepare source inputs */ | 1151 | /* Prepare source inputs */ |
| 1117 | if (IS_PF(hwfn->cdev)) { | 1152 | if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { |
| 1118 | memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); | 1153 | dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); |
| 1119 | memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); | 1154 | return; |
| 1120 | memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), | ||
| 1121 | sizeof(link_caps)); | ||
| 1122 | } else { | ||
| 1123 | qed_vf_get_link_params(hwfn, ¶ms); | ||
| 1124 | qed_vf_get_link_state(hwfn, &link); | ||
| 1125 | qed_vf_get_link_caps(hwfn, &link_caps); | ||
| 1126 | } | 1155 | } |
| 1127 | 1156 | ||
| 1128 | /* Set the link parameters to pass to protocol driver */ | 1157 | /* Set the link parameters to pass to protocol driver */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c8667c65e685..c90b2b6ad969 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h | |||
| @@ -12,11 +12,13 @@ | |||
| 12 | #include "qed_vf.h" | 12 | #include "qed_vf.h" |
| 13 | #define QED_VF_ARRAY_LENGTH (3) | 13 | #define QED_VF_ARRAY_LENGTH (3) |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_QED_SRIOV | ||
| 15 | #define IS_VF(cdev) ((cdev)->b_is_vf) | 16 | #define IS_VF(cdev) ((cdev)->b_is_vf) |
| 16 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) | 17 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) |
| 17 | #ifdef CONFIG_QED_SRIOV | ||
| 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) | 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) |
| 19 | #else | 19 | #else |
| 20 | #define IS_VF(cdev) (0) | ||
| 21 | #define IS_PF(cdev) (1) | ||
| 20 | #define IS_PF_SRIOV(p_hwfn) (0) | 22 | #define IS_PF_SRIOV(p_hwfn) (0) |
| 21 | #endif | 23 | #endif |
| 22 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) | 24 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1bc75358cbc4..ad3cae3b7243 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c | |||
| @@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) | |||
| 230 | case ETH_SS_PRIV_FLAGS: | 230 | case ETH_SS_PRIV_FLAGS: |
| 231 | return QEDE_PRI_FLAG_LEN; | 231 | return QEDE_PRI_FLAG_LEN; |
| 232 | case ETH_SS_TEST: | 232 | case ETH_SS_TEST: |
| 233 | return QEDE_ETHTOOL_TEST_MAX; | 233 | if (!IS_VF(edev)) |
| 234 | return QEDE_ETHTOOL_TEST_MAX; | ||
| 235 | else | ||
| 236 | return 0; | ||
| 234 | default: | 237 | default: |
| 235 | DP_VERBOSE(edev, QED_MSG_DEBUG, | 238 | DP_VERBOSE(edev, QED_MSG_DEBUG, |
| 236 | "Unsupported stringset 0x%08x\n", stringset); | 239 | "Unsupported stringset 0x%08x\n", stringset); |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 337e839ca586..5733d1888223 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
| @@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = { | |||
| 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, | 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, |
| 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, | 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, |
| 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, | 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, |
| 90 | #ifdef CONFIG_QED_SRIOV | ||
| 90 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, | 91 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, |
| 92 | #endif | ||
| 91 | { 0 } | 93 | { 0 } |
| 92 | }; | 94 | }; |
| 93 | 95 | ||
| @@ -1824,7 +1826,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx, | |||
| 1824 | { | 1826 | { |
| 1825 | struct qede_dev *edev = netdev_priv(dev); | 1827 | struct qede_dev *edev = netdev_priv(dev); |
| 1826 | 1828 | ||
| 1827 | return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, | 1829 | return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, |
| 1828 | max_tx_rate); | 1830 | max_tx_rate); |
| 1829 | } | 1831 | } |
| 1830 | 1832 | ||
| @@ -2091,6 +2093,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) | |||
| 2091 | edev->accept_any_vlan = false; | 2093 | edev->accept_any_vlan = false; |
| 2092 | } | 2094 | } |
| 2093 | 2095 | ||
| 2096 | int qede_set_features(struct net_device *dev, netdev_features_t features) | ||
| 2097 | { | ||
| 2098 | struct qede_dev *edev = netdev_priv(dev); | ||
| 2099 | netdev_features_t changes = features ^ dev->features; | ||
| 2100 | bool need_reload = false; | ||
| 2101 | |||
| 2102 | /* No action needed if hardware GRO is disabled during driver load */ | ||
| 2103 | if (changes & NETIF_F_GRO) { | ||
| 2104 | if (dev->features & NETIF_F_GRO) | ||
| 2105 | need_reload = !edev->gro_disable; | ||
| 2106 | else | ||
| 2107 | need_reload = edev->gro_disable; | ||
| 2108 | } | ||
| 2109 | |||
| 2110 | if (need_reload && netif_running(edev->ndev)) { | ||
| 2111 | dev->features = features; | ||
| 2112 | qede_reload(edev, NULL, NULL); | ||
| 2113 | return 1; | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | return 0; | ||
| 2117 | } | ||
| 2118 | |||
| 2094 | #ifdef CONFIG_QEDE_VXLAN | 2119 | #ifdef CONFIG_QEDE_VXLAN |
| 2095 | static void qede_add_vxlan_port(struct net_device *dev, | 2120 | static void qede_add_vxlan_port(struct net_device *dev, |
| 2096 | sa_family_t sa_family, __be16 port) | 2121 | sa_family_t sa_family, __be16 port) |
| @@ -2175,6 +2200,7 @@ static const struct net_device_ops qede_netdev_ops = { | |||
| 2175 | #endif | 2200 | #endif |
| 2176 | .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, | 2201 | .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
| 2177 | .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, | 2202 | .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
| 2203 | .ndo_set_features = qede_set_features, | ||
| 2178 | .ndo_get_stats64 = qede_get_stats64, | 2204 | .ndo_get_stats64 = qede_get_stats64, |
| 2179 | #ifdef CONFIG_QED_SRIOV | 2205 | #ifdef CONFIG_QED_SRIOV |
| 2180 | .ndo_set_vf_link_state = qede_set_vf_link_state, | 2206 | .ndo_set_vf_link_state = qede_set_vf_link_state, |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 83d72106471c..fd5d1c93b55b 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev) | |||
| 4846 | } | 4846 | } |
| 4847 | 4847 | ||
| 4848 | /* Disabling the timer */ | 4848 | /* Disabling the timer */ |
| 4849 | del_timer_sync(&qdev->timer); | ||
| 4850 | ql_cancel_all_work_sync(qdev); | 4849 | ql_cancel_all_work_sync(qdev); |
| 4851 | 4850 | ||
| 4852 | for (i = 0; i < qdev->rss_ring_count; i++) | 4851 | for (i = 0; i < qdev->rss_ring_count; i++) |
| @@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |||
| 4873 | return PCI_ERS_RESULT_CAN_RECOVER; | 4872 | return PCI_ERS_RESULT_CAN_RECOVER; |
| 4874 | case pci_channel_io_frozen: | 4873 | case pci_channel_io_frozen: |
| 4875 | netif_device_detach(ndev); | 4874 | netif_device_detach(ndev); |
| 4875 | del_timer_sync(&qdev->timer); | ||
| 4876 | if (netif_running(ndev)) | 4876 | if (netif_running(ndev)) |
| 4877 | ql_eeh_close(ndev); | 4877 | ql_eeh_close(ndev); |
| 4878 | pci_disable_device(pdev); | 4878 | pci_disable_device(pdev); |
| @@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |||
| 4880 | case pci_channel_io_perm_failure: | 4880 | case pci_channel_io_perm_failure: |
| 4881 | dev_err(&pdev->dev, | 4881 | dev_err(&pdev->dev, |
| 4882 | "%s: pci_channel_io_perm_failure.\n", __func__); | 4882 | "%s: pci_channel_io_perm_failure.\n", __func__); |
| 4883 | del_timer_sync(&qdev->timer); | ||
| 4883 | ql_eeh_close(ndev); | 4884 | ql_eeh_close(ndev); |
| 4884 | set_bit(QL_EEH_FATAL, &qdev->flags); | 4885 | set_bit(QL_EEH_FATAL, &qdev->flags); |
| 4885 | return PCI_ERS_RESULT_DISCONNECT; | 4886 | return PCI_ERS_RESULT_DISCONNECT; |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 1681084cc96f..1f309127457d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -619,6 +619,17 @@ fail: | |||
| 619 | return rc; | 619 | return rc; |
| 620 | } | 620 | } |
| 621 | 621 | ||
| 622 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) | ||
| 623 | { | ||
| 624 | struct efx_channel *channel; | ||
| 625 | struct efx_tx_queue *tx_queue; | ||
| 626 | |||
| 627 | /* All our existing PIO buffers went away */ | ||
| 628 | efx_for_each_channel(channel, efx) | ||
| 629 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
| 630 | tx_queue->piobuf = NULL; | ||
| 631 | } | ||
| 632 | |||
| 622 | #else /* !EFX_USE_PIO */ | 633 | #else /* !EFX_USE_PIO */ |
| 623 | 634 | ||
| 624 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | 635 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| @@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx) | |||
| 635 | { | 646 | { |
| 636 | } | 647 | } |
| 637 | 648 | ||
| 649 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) | ||
| 650 | { | ||
| 651 | } | ||
| 652 | |||
| 638 | #endif /* EFX_USE_PIO */ | 653 | #endif /* EFX_USE_PIO */ |
| 639 | 654 | ||
| 640 | static void efx_ef10_remove(struct efx_nic *efx) | 655 | static void efx_ef10_remove(struct efx_nic *efx) |
| @@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) | |||
| 1018 | nic_data->must_realloc_vis = true; | 1033 | nic_data->must_realloc_vis = true; |
| 1019 | nic_data->must_restore_filters = true; | 1034 | nic_data->must_restore_filters = true; |
| 1020 | nic_data->must_restore_piobufs = true; | 1035 | nic_data->must_restore_piobufs = true; |
| 1036 | efx_ef10_forget_old_piobufs(efx); | ||
| 1021 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | 1037 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 1022 | 1038 | ||
| 1023 | /* Driver-created vswitches and vports must be re-created */ | 1039 | /* Driver-created vswitches and vports must be re-created */ |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0705ec869487..097f363f1630 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
| @@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx) | |||
| 1726 | 1726 | ||
| 1727 | #ifdef CONFIG_RFS_ACCEL | 1727 | #ifdef CONFIG_RFS_ACCEL |
| 1728 | if (efx->type->offload_features & NETIF_F_NTUPLE) { | 1728 | if (efx->type->offload_features & NETIF_F_NTUPLE) { |
| 1729 | efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, | 1729 | struct efx_channel *channel; |
| 1730 | sizeof(*efx->rps_flow_id), | 1730 | int i, success = 1; |
| 1731 | GFP_KERNEL); | 1731 | |
| 1732 | if (!efx->rps_flow_id) { | 1732 | efx_for_each_channel(channel, efx) { |
| 1733 | channel->rps_flow_id = | ||
| 1734 | kcalloc(efx->type->max_rx_ip_filters, | ||
| 1735 | sizeof(*channel->rps_flow_id), | ||
| 1736 | GFP_KERNEL); | ||
| 1737 | if (!channel->rps_flow_id) | ||
| 1738 | success = 0; | ||
| 1739 | else | ||
| 1740 | for (i = 0; | ||
| 1741 | i < efx->type->max_rx_ip_filters; | ||
| 1742 | ++i) | ||
| 1743 | channel->rps_flow_id[i] = | ||
| 1744 | RPS_FLOW_ID_INVALID; | ||
| 1745 | } | ||
| 1746 | |||
| 1747 | if (!success) { | ||
| 1748 | efx_for_each_channel(channel, efx) | ||
| 1749 | kfree(channel->rps_flow_id); | ||
| 1733 | efx->type->filter_table_remove(efx); | 1750 | efx->type->filter_table_remove(efx); |
| 1734 | rc = -ENOMEM; | 1751 | rc = -ENOMEM; |
| 1735 | goto out_unlock; | 1752 | goto out_unlock; |
| 1736 | } | 1753 | } |
| 1754 | |||
| 1755 | efx->rps_expire_index = efx->rps_expire_channel = 0; | ||
| 1737 | } | 1756 | } |
| 1738 | #endif | 1757 | #endif |
| 1739 | out_unlock: | 1758 | out_unlock: |
| @@ -1744,7 +1763,10 @@ out_unlock: | |||
| 1744 | static void efx_remove_filters(struct efx_nic *efx) | 1763 | static void efx_remove_filters(struct efx_nic *efx) |
| 1745 | { | 1764 | { |
| 1746 | #ifdef CONFIG_RFS_ACCEL | 1765 | #ifdef CONFIG_RFS_ACCEL |
| 1747 | kfree(efx->rps_flow_id); | 1766 | struct efx_channel *channel; |
| 1767 | |||
| 1768 | efx_for_each_channel(channel, efx) | ||
| 1769 | kfree(channel->rps_flow_id); | ||
| 1748 | #endif | 1770 | #endif |
| 1749 | down_write(&efx->filter_sem); | 1771 | down_write(&efx->filter_sem); |
| 1750 | efx->type->filter_table_remove(efx); | 1772 | efx->type->filter_table_remove(efx); |
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 7f295c4d7b80..2a9228a6e4a0 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c | |||
| @@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) | |||
| 189 | 189 | ||
| 190 | case MC_CMD_MEDIA_XFP: | 190 | case MC_CMD_MEDIA_XFP: |
| 191 | case MC_CMD_MEDIA_SFP_PLUS: | 191 | case MC_CMD_MEDIA_SFP_PLUS: |
| 192 | result |= SUPPORTED_FIBRE; | ||
| 193 | break; | ||
| 194 | |||
| 195 | case MC_CMD_MEDIA_QSFP_PLUS: | 192 | case MC_CMD_MEDIA_QSFP_PLUS: |
| 196 | result |= SUPPORTED_FIBRE; | 193 | result |= SUPPORTED_FIBRE; |
| 194 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
| 195 | result |= SUPPORTED_1000baseT_Full; | ||
| 196 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
| 197 | result |= SUPPORTED_10000baseT_Full; | ||
| 197 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | 198 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) |
| 198 | result |= SUPPORTED_40000baseCR4_Full; | 199 | result |= SUPPORTED_40000baseCR4_Full; |
| 199 | break; | 200 | break; |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 38c422321cda..d13ddf9703ff 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
| @@ -403,6 +403,8 @@ enum efx_sync_events_state { | |||
| 403 | * @event_test_cpu: Last CPU to handle interrupt or test event for this channel | 403 | * @event_test_cpu: Last CPU to handle interrupt or test event for this channel |
| 404 | * @irq_count: Number of IRQs since last adaptive moderation decision | 404 | * @irq_count: Number of IRQs since last adaptive moderation decision |
| 405 | * @irq_mod_score: IRQ moderation score | 405 | * @irq_mod_score: IRQ moderation score |
| 406 | * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, | ||
| 407 | * indexed by filter ID | ||
| 406 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | 408 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors |
| 407 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | 409 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors |
| 408 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | 410 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors |
| @@ -446,6 +448,8 @@ struct efx_channel { | |||
| 446 | unsigned int irq_mod_score; | 448 | unsigned int irq_mod_score; |
| 447 | #ifdef CONFIG_RFS_ACCEL | 449 | #ifdef CONFIG_RFS_ACCEL |
| 448 | unsigned int rfs_filters_added; | 450 | unsigned int rfs_filters_added; |
| 451 | #define RPS_FLOW_ID_INVALID 0xFFFFFFFF | ||
| 452 | u32 *rps_flow_id; | ||
| 449 | #endif | 453 | #endif |
| 450 | 454 | ||
| 451 | unsigned n_rx_tobe_disc; | 455 | unsigned n_rx_tobe_disc; |
| @@ -889,9 +893,9 @@ struct vfdi_status; | |||
| 889 | * @filter_sem: Filter table rw_semaphore, for freeing the table | 893 | * @filter_sem: Filter table rw_semaphore, for freeing the table |
| 890 | * @filter_lock: Filter table lock, for mere content changes | 894 | * @filter_lock: Filter table lock, for mere content changes |
| 891 | * @filter_state: Architecture-dependent filter table state | 895 | * @filter_state: Architecture-dependent filter table state |
| 892 | * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, | 896 | * @rps_expire_channel: Next channel to check for expiry |
| 893 | * indexed by filter ID | 897 | * @rps_expire_index: Next index to check for expiry in |
| 894 | * @rps_expire_index: Next index to check for expiry in @rps_flow_id | 898 | * @rps_expire_channel's @rps_flow_id |
| 895 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. | 899 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. |
| 896 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. | 900 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. |
| 897 | * Decremented when the efx_flush_rx_queue() is called. | 901 | * Decremented when the efx_flush_rx_queue() is called. |
| @@ -1035,7 +1039,7 @@ struct efx_nic { | |||
| 1035 | spinlock_t filter_lock; | 1039 | spinlock_t filter_lock; |
| 1036 | void *filter_state; | 1040 | void *filter_state; |
| 1037 | #ifdef CONFIG_RFS_ACCEL | 1041 | #ifdef CONFIG_RFS_ACCEL |
| 1038 | u32 *rps_flow_id; | 1042 | unsigned int rps_expire_channel; |
| 1039 | unsigned int rps_expire_index; | 1043 | unsigned int rps_expire_index; |
| 1040 | #endif | 1044 | #endif |
| 1041 | 1045 | ||
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8956995b2fe7..02b0b5272c14 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
| @@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 842 | struct efx_nic *efx = netdev_priv(net_dev); | 842 | struct efx_nic *efx = netdev_priv(net_dev); |
| 843 | struct efx_channel *channel; | 843 | struct efx_channel *channel; |
| 844 | struct efx_filter_spec spec; | 844 | struct efx_filter_spec spec; |
| 845 | const __be16 *ports; | 845 | struct flow_keys fk; |
| 846 | __be16 ether_type; | ||
| 847 | int nhoff; | ||
| 848 | int rc; | 846 | int rc; |
| 849 | 847 | ||
| 850 | /* The core RPS/RFS code has already parsed and validated | 848 | if (flow_id == RPS_FLOW_ID_INVALID) |
| 851 | * VLAN, IP and transport headers. We assume they are in the | 849 | return -EINVAL; |
| 852 | * header area. | ||
| 853 | */ | ||
| 854 | |||
| 855 | if (skb->protocol == htons(ETH_P_8021Q)) { | ||
| 856 | const struct vlan_hdr *vh = | ||
| 857 | (const struct vlan_hdr *)skb->data; | ||
| 858 | 850 | ||
| 859 | /* We can't filter on the IP 5-tuple and the vlan | 851 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) |
| 860 | * together, so just strip the vlan header and filter | 852 | return -EPROTONOSUPPORT; |
| 861 | * on the IP part. | ||
| 862 | */ | ||
| 863 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); | ||
| 864 | ether_type = vh->h_vlan_encapsulated_proto; | ||
| 865 | nhoff = sizeof(struct vlan_hdr); | ||
| 866 | } else { | ||
| 867 | ether_type = skb->protocol; | ||
| 868 | nhoff = 0; | ||
| 869 | } | ||
| 870 | 853 | ||
| 871 | if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) | 854 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) |
| 855 | return -EPROTONOSUPPORT; | ||
| 856 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) | ||
| 872 | return -EPROTONOSUPPORT; | 857 | return -EPROTONOSUPPORT; |
| 873 | 858 | ||
| 874 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, | 859 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, |
| @@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 878 | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | | 863 | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | |
| 879 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | | 864 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | |
| 880 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; | 865 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; |
| 881 | spec.ether_type = ether_type; | 866 | spec.ether_type = fk.basic.n_proto; |
| 882 | 867 | spec.ip_proto = fk.basic.ip_proto; | |
| 883 | if (ether_type == htons(ETH_P_IP)) { | 868 | |
| 884 | const struct iphdr *ip = | 869 | if (fk.basic.n_proto == htons(ETH_P_IP)) { |
| 885 | (const struct iphdr *)(skb->data + nhoff); | 870 | spec.rem_host[0] = fk.addrs.v4addrs.src; |
| 886 | 871 | spec.loc_host[0] = fk.addrs.v4addrs.dst; | |
| 887 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); | ||
| 888 | if (ip_is_fragment(ip)) | ||
| 889 | return -EPROTONOSUPPORT; | ||
| 890 | spec.ip_proto = ip->protocol; | ||
| 891 | spec.rem_host[0] = ip->saddr; | ||
| 892 | spec.loc_host[0] = ip->daddr; | ||
| 893 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); | ||
| 894 | ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); | ||
| 895 | } else { | 872 | } else { |
| 896 | const struct ipv6hdr *ip6 = | 873 | memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); |
| 897 | (const struct ipv6hdr *)(skb->data + nhoff); | 874 | memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); |
| 898 | |||
| 899 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < | ||
| 900 | nhoff + sizeof(*ip6) + 4); | ||
| 901 | spec.ip_proto = ip6->nexthdr; | ||
| 902 | memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); | ||
| 903 | memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); | ||
| 904 | ports = (const __be16 *)(ip6 + 1); | ||
| 905 | } | 875 | } |
| 906 | 876 | ||
| 907 | spec.rem_port = ports[0]; | 877 | spec.rem_port = fk.ports.src; |
| 908 | spec.loc_port = ports[1]; | 878 | spec.loc_port = fk.ports.dst; |
| 909 | 879 | ||
| 910 | rc = efx->type->filter_rfs_insert(efx, &spec); | 880 | rc = efx->type->filter_rfs_insert(efx, &spec); |
| 911 | if (rc < 0) | 881 | if (rc < 0) |
| 912 | return rc; | 882 | return rc; |
| 913 | 883 | ||
| 914 | /* Remember this so we can check whether to expire the filter later */ | 884 | /* Remember this so we can check whether to expire the filter later */ |
| 915 | efx->rps_flow_id[rc] = flow_id; | 885 | channel = efx_get_channel(efx, rxq_index); |
| 916 | channel = efx_get_channel(efx, skb_get_rx_queue(skb)); | 886 | channel->rps_flow_id[rc] = flow_id; |
| 917 | ++channel->rfs_filters_added; | 887 | ++channel->rfs_filters_added; |
| 918 | 888 | ||
| 919 | if (ether_type == htons(ETH_P_IP)) | 889 | if (spec.ether_type == htons(ETH_P_IP)) |
| 920 | netif_info(efx, rx_status, efx->net_dev, | 890 | netif_info(efx, rx_status, efx->net_dev, |
| 921 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", | 891 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", |
| 922 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", | 892 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
| 923 | spec.rem_host, ntohs(ports[0]), spec.loc_host, | 893 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
| 924 | ntohs(ports[1]), rxq_index, flow_id, rc); | 894 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
| 925 | else | 895 | else |
| 926 | netif_info(efx, rx_status, efx->net_dev, | 896 | netif_info(efx, rx_status, efx->net_dev, |
| 927 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", | 897 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", |
| 928 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", | 898 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
| 929 | spec.rem_host, ntohs(ports[0]), spec.loc_host, | 899 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
| 930 | ntohs(ports[1]), rxq_index, flow_id, rc); | 900 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
| 931 | 901 | ||
| 932 | return rc; | 902 | return rc; |
| 933 | } | 903 | } |
| @@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 935 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) | 905 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) |
| 936 | { | 906 | { |
| 937 | bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); | 907 | bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); |
| 938 | unsigned int index, size; | 908 | unsigned int channel_idx, index, size; |
| 939 | u32 flow_id; | 909 | u32 flow_id; |
| 940 | 910 | ||
| 941 | if (!spin_trylock_bh(&efx->filter_lock)) | 911 | if (!spin_trylock_bh(&efx->filter_lock)) |
| 942 | return false; | 912 | return false; |
| 943 | 913 | ||
| 944 | expire_one = efx->type->filter_rfs_expire_one; | 914 | expire_one = efx->type->filter_rfs_expire_one; |
| 915 | channel_idx = efx->rps_expire_channel; | ||
| 945 | index = efx->rps_expire_index; | 916 | index = efx->rps_expire_index; |
| 946 | size = efx->type->max_rx_ip_filters; | 917 | size = efx->type->max_rx_ip_filters; |
| 947 | while (quota--) { | 918 | while (quota--) { |
| 948 | flow_id = efx->rps_flow_id[index]; | 919 | struct efx_channel *channel = efx_get_channel(efx, channel_idx); |
| 949 | if (expire_one(efx, flow_id, index)) | 920 | flow_id = channel->rps_flow_id[index]; |
| 921 | |||
| 922 | if (flow_id != RPS_FLOW_ID_INVALID && | ||
| 923 | expire_one(efx, flow_id, index)) { | ||
| 950 | netif_info(efx, rx_status, efx->net_dev, | 924 | netif_info(efx, rx_status, efx->net_dev, |
| 951 | "expired filter %d [flow %u]\n", | 925 | "expired filter %d [queue %u flow %u]\n", |
| 952 | index, flow_id); | 926 | index, channel_idx, flow_id); |
| 953 | if (++index == size) | 927 | channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; |
| 928 | } | ||
| 929 | if (++index == size) { | ||
| 930 | if (++channel_idx == efx->n_channels) | ||
| 931 | channel_idx = 0; | ||
| 954 | index = 0; | 932 | index = 0; |
| 933 | } | ||
| 955 | } | 934 | } |
| 935 | efx->rps_expire_channel = channel_idx; | ||
| 956 | efx->rps_expire_index = index; | 936 | efx->rps_expire_index = index; |
| 957 | 937 | ||
| 958 | spin_unlock_bh(&efx->filter_lock); | 938 | spin_unlock_bh(&efx->filter_lock); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 4f7283d05588..44da877d2483 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
| @@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, | |||
| 156 | struct netdev_hw_addr *ha; | 156 | struct netdev_hw_addr *ha; |
| 157 | 157 | ||
| 158 | netdev_for_each_uc_addr(ha, dev) { | 158 | netdev_for_each_uc_addr(ha, dev) { |
| 159 | dwmac4_set_umac_addr(ioaddr, ha->addr, reg); | 159 | dwmac4_set_umac_addr(hw, ha->addr, reg); |
| 160 | reg++; | 160 | reg++; |
| 161 | } | 161 | } |
| 162 | } | 162 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eac45d0c75e2..a473c182c91d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev) | |||
| 3450 | if (!netif_running(ndev)) | 3450 | if (!netif_running(ndev)) |
| 3451 | return 0; | 3451 | return 0; |
| 3452 | 3452 | ||
| 3453 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3454 | |||
| 3455 | /* Power Down bit, into the PM register, is cleared | 3453 | /* Power Down bit, into the PM register, is cleared |
| 3456 | * automatically as soon as a magic packet or a Wake-up frame | 3454 | * automatically as soon as a magic packet or a Wake-up frame |
| 3457 | * is received. Anyway, it's better to manually clear | 3455 | * is received. Anyway, it's better to manually clear |
| @@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev) | |||
| 3459 | * from another devices (e.g. serial console). | 3457 | * from another devices (e.g. serial console). |
| 3460 | */ | 3458 | */ |
| 3461 | if (device_may_wakeup(priv->device)) { | 3459 | if (device_may_wakeup(priv->device)) { |
| 3460 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3462 | priv->hw->mac->pmt(priv->hw, 0); | 3461 | priv->hw->mac->pmt(priv->hw, 0); |
| 3462 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 3463 | priv->irq_wake = 0; | 3463 | priv->irq_wake = 0; |
| 3464 | } else { | 3464 | } else { |
| 3465 | pinctrl_pm_select_default_state(priv->device); | 3465 | pinctrl_pm_select_default_state(priv->device); |
| @@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev) | |||
| 3473 | 3473 | ||
| 3474 | netif_device_attach(ndev); | 3474 | netif_device_attach(ndev); |
| 3475 | 3475 | ||
| 3476 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3477 | |||
| 3476 | priv->cur_rx = 0; | 3478 | priv->cur_rx = 0; |
| 3477 | priv->dirty_rx = 0; | 3479 | priv->dirty_rx = 0; |
| 3478 | priv->dirty_tx = 0; | 3480 | priv->dirty_tx = 0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 3f83c369f56c..ec295851812b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
| @@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 297 | return -ENOMEM; | 297 | return -ENOMEM; |
| 298 | 298 | ||
| 299 | if (mdio_bus_data->irqs) | 299 | if (mdio_bus_data->irqs) |
| 300 | memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); | 300 | memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq)); |
| 301 | 301 | ||
| 302 | #ifdef CONFIG_OF | 302 | #ifdef CONFIG_OF |
| 303 | if (priv->device->of_node) | 303 | if (priv->device->of_node) |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4b08a2f52b3e..e6bb0ecb12c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
| 1339 | if (priv->coal_intvl != 0) { | 1339 | if (priv->coal_intvl != 0) { |
| 1340 | struct ethtool_coalesce coal; | 1340 | struct ethtool_coalesce coal; |
| 1341 | 1341 | ||
| 1342 | coal.rx_coalesce_usecs = (priv->coal_intvl << 4); | 1342 | coal.rx_coalesce_usecs = priv->coal_intvl; |
| 1343 | cpsw_set_coalesce(ndev, &coal); | 1343 | cpsw_set_coalesce(ndev, &coal); |
| 1344 | } | 1344 | } |
| 1345 | 1345 | ||
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a0f64cba86ba..2ace126533cd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -990,7 +990,7 @@ static void team_port_disable(struct team *team, | |||
| 990 | #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ | 990 | #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ |
| 991 | NETIF_F_RXCSUM | NETIF_F_ALL_TSO) | 991 | NETIF_F_RXCSUM | NETIF_F_ALL_TSO) |
| 992 | 992 | ||
| 993 | static void __team_compute_features(struct team *team) | 993 | static void ___team_compute_features(struct team *team) |
| 994 | { | 994 | { |
| 995 | struct team_port *port; | 995 | struct team_port *port; |
| 996 | u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; | 996 | u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; |
| @@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team) | |||
| 1021 | team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 1021 | team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
| 1022 | if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) | 1022 | if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) |
| 1023 | team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; | 1023 | team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; |
| 1024 | } | ||
| 1024 | 1025 | ||
| 1026 | static void __team_compute_features(struct team *team) | ||
| 1027 | { | ||
| 1028 | ___team_compute_features(team); | ||
| 1025 | netdev_change_features(team->dev); | 1029 | netdev_change_features(team->dev); |
| 1026 | } | 1030 | } |
| 1027 | 1031 | ||
| 1028 | static void team_compute_features(struct team *team) | 1032 | static void team_compute_features(struct team *team) |
| 1029 | { | 1033 | { |
| 1030 | mutex_lock(&team->lock); | 1034 | mutex_lock(&team->lock); |
| 1031 | __team_compute_features(team); | 1035 | ___team_compute_features(team); |
| 1032 | mutex_unlock(&team->lock); | 1036 | mutex_unlock(&team->lock); |
| 1037 | netdev_change_features(team->dev); | ||
| 1033 | } | 1038 | } |
| 1034 | 1039 | ||
| 1035 | static int team_port_enter(struct team *team, struct team_port *port) | 1040 | static int team_port_enter(struct team *team, struct team_port *port) |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36cd7f016a8d..9bbe0161a2f4 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
| @@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb) | |||
| 473 | goto goon; | 473 | goto goon; |
| 474 | } | 474 | } |
| 475 | 475 | ||
| 476 | if (!count || count < 4) | 476 | if (count < 4) |
| 477 | goto goon; | 477 | goto goon; |
| 478 | 478 | ||
| 479 | rx_status = buf[count - 2]; | 479 | rx_status = buf[count - 2]; |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d9d2806a47b1..dc989a8b5afb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
| @@ -61,6 +61,8 @@ | |||
| 61 | #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ | 61 | #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ |
| 62 | SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) | 62 | SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) |
| 63 | 63 | ||
| 64 | #define CARRIER_CHECK_DELAY (2 * HZ) | ||
| 65 | |||
| 64 | struct smsc95xx_priv { | 66 | struct smsc95xx_priv { |
| 65 | u32 mac_cr; | 67 | u32 mac_cr; |
| 66 | u32 hash_hi; | 68 | u32 hash_hi; |
| @@ -69,6 +71,9 @@ struct smsc95xx_priv { | |||
| 69 | spinlock_t mac_cr_lock; | 71 | spinlock_t mac_cr_lock; |
| 70 | u8 features; | 72 | u8 features; |
| 71 | u8 suspend_flags; | 73 | u8 suspend_flags; |
| 74 | bool link_ok; | ||
| 75 | struct delayed_work carrier_check; | ||
| 76 | struct usbnet *dev; | ||
| 72 | }; | 77 | }; |
| 73 | 78 | ||
| 74 | static bool turbo_mode = true; | 79 | static bool turbo_mode = true; |
| @@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) | |||
| 624 | intdata); | 629 | intdata); |
| 625 | } | 630 | } |
| 626 | 631 | ||
| 632 | static void set_carrier(struct usbnet *dev, bool link) | ||
| 633 | { | ||
| 634 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | ||
| 635 | |||
| 636 | if (pdata->link_ok == link) | ||
| 637 | return; | ||
| 638 | |||
| 639 | pdata->link_ok = link; | ||
| 640 | |||
| 641 | if (link) | ||
| 642 | usbnet_link_change(dev, 1, 0); | ||
| 643 | else | ||
| 644 | usbnet_link_change(dev, 0, 0); | ||
| 645 | } | ||
| 646 | |||
| 647 | static void check_carrier(struct work_struct *work) | ||
| 648 | { | ||
| 649 | struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv, | ||
| 650 | carrier_check.work); | ||
| 651 | struct usbnet *dev = pdata->dev; | ||
| 652 | int ret; | ||
| 653 | |||
| 654 | if (pdata->suspend_flags != 0) | ||
| 655 | return; | ||
| 656 | |||
| 657 | ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR); | ||
| 658 | if (ret < 0) { | ||
| 659 | netdev_warn(dev->net, "Failed to read MII_BMSR\n"); | ||
| 660 | return; | ||
| 661 | } | ||
| 662 | if (ret & BMSR_LSTATUS) | ||
| 663 | set_carrier(dev, 1); | ||
| 664 | else | ||
| 665 | set_carrier(dev, 0); | ||
| 666 | |||
| 667 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
| 668 | } | ||
| 669 | |||
| 627 | /* Enable or disable Tx & Rx checksum offload engines */ | 670 | /* Enable or disable Tx & Rx checksum offload engines */ |
| 628 | static int smsc95xx_set_features(struct net_device *netdev, | 671 | static int smsc95xx_set_features(struct net_device *netdev, |
| 629 | netdev_features_t features) | 672 | netdev_features_t features) |
| @@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 1165 | dev->net->flags |= IFF_MULTICAST; | 1208 | dev->net->flags |= IFF_MULTICAST; |
| 1166 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; | 1209 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; |
| 1167 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | 1210 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; |
| 1211 | |||
| 1212 | pdata->dev = dev; | ||
| 1213 | INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier); | ||
| 1214 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
| 1215 | |||
| 1168 | return 0; | 1216 | return 0; |
| 1169 | } | 1217 | } |
| 1170 | 1218 | ||
| 1171 | static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) | 1219 | static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) |
| 1172 | { | 1220 | { |
| 1173 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | 1221 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); |
| 1222 | |||
| 1174 | if (pdata) { | 1223 | if (pdata) { |
| 1224 | cancel_delayed_work(&pdata->carrier_check); | ||
| 1175 | netif_dbg(dev, ifdown, dev->net, "free pdata\n"); | 1225 | netif_dbg(dev, ifdown, dev->net, "free pdata\n"); |
| 1176 | kfree(pdata); | 1226 | kfree(pdata); |
| 1177 | pdata = NULL; | 1227 | pdata = NULL; |
| @@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf) | |||
| 1695 | 1745 | ||
| 1696 | /* do this first to ensure it's cleared even in error case */ | 1746 | /* do this first to ensure it's cleared even in error case */ |
| 1697 | pdata->suspend_flags = 0; | 1747 | pdata->suspend_flags = 0; |
| 1748 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
| 1698 | 1749 | ||
| 1699 | if (suspend_flags & SUSPEND_ALLMODES) { | 1750 | if (suspend_flags & SUSPEND_ALLMODES) { |
| 1700 | /* clear wake-up sources */ | 1751 | /* clear wake-up sources */ |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 49d84e540343..e0638e556fe7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 1925 | 1925 | ||
| 1926 | virtio_device_ready(vdev); | 1926 | virtio_device_ready(vdev); |
| 1927 | 1927 | ||
| 1928 | /* Last of all, set up some receive buffers. */ | ||
| 1929 | for (i = 0; i < vi->curr_queue_pairs; i++) { | ||
| 1930 | try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); | ||
| 1931 | |||
| 1932 | /* If we didn't even get one input buffer, we're useless. */ | ||
| 1933 | if (vi->rq[i].vq->num_free == | ||
| 1934 | virtqueue_get_vring_size(vi->rq[i].vq)) { | ||
| 1935 | free_unused_bufs(vi); | ||
| 1936 | err = -ENOMEM; | ||
| 1937 | goto free_recv_bufs; | ||
| 1938 | } | ||
| 1939 | } | ||
| 1940 | |||
| 1941 | vi->nb.notifier_call = &virtnet_cpu_callback; | 1928 | vi->nb.notifier_call = &virtnet_cpu_callback; |
| 1942 | err = register_hotcpu_notifier(&vi->nb); | 1929 | err = register_hotcpu_notifier(&vi->nb); |
| 1943 | if (err) { | 1930 | if (err) { |
| 1944 | pr_debug("virtio_net: registering cpu notifier failed\n"); | 1931 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
| 1945 | goto free_recv_bufs; | 1932 | goto free_unregister_netdev; |
| 1946 | } | 1933 | } |
| 1947 | 1934 | ||
| 1948 | /* Assume link up if device can't report link status, | 1935 | /* Assume link up if device can't report link status, |
| @@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 1960 | 1947 | ||
| 1961 | return 0; | 1948 | return 0; |
| 1962 | 1949 | ||
| 1963 | free_recv_bufs: | 1950 | free_unregister_netdev: |
| 1964 | vi->vdev->config->reset(vdev); | 1951 | vi->vdev->config->reset(vdev); |
| 1965 | 1952 | ||
| 1966 | free_receive_bufs(vi); | ||
| 1967 | unregister_netdev(dev); | 1953 | unregister_netdev(dev); |
| 1968 | free_vqs: | 1954 | free_vqs: |
| 1969 | cancel_delayed_work_sync(&vi->refill); | 1955 | cancel_delayed_work_sync(&vi->refill); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index db8022ae415b..08885bc8d6db 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; | 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; |
| 1370 | 1370 | ||
| 1371 | segCnt = rcdlro->segCnt; | 1371 | segCnt = rcdlro->segCnt; |
| 1372 | BUG_ON(segCnt <= 1); | 1372 | WARN_ON_ONCE(segCnt == 0); |
| 1373 | mss = rcdlro->mss; | 1373 | mss = rcdlro->mss; |
| 1374 | if (unlikely(segCnt <= 1)) | 1374 | if (unlikely(segCnt <= 1)) |
| 1375 | segCnt = 0; | 1375 | segCnt = 0; |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index c4825392d64b..3d2b64e63408 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
| @@ -69,10 +69,10 @@ | |||
| 69 | /* | 69 | /* |
| 70 | * Version numbers | 70 | * Version numbers |
| 71 | */ | 71 | */ |
| 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k" |
| 73 | 73 | ||
| 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
| 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040700 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040800 |
| 76 | 76 | ||
| 77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
| 78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8ff30c3bdfce..f999db2f97b4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, | |||
| 3086 | if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) | 3086 | if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) |
| 3087 | conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; | 3087 | conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; |
| 3088 | 3088 | ||
| 3089 | if (tb[IFLA_MTU]) | ||
| 3090 | conf.mtu = nla_get_u32(tb[IFLA_MTU]); | ||
| 3091 | |||
| 3089 | err = vxlan_dev_configure(src_net, dev, &conf); | 3092 | err = vxlan_dev_configure(src_net, dev, &conf); |
| 3090 | switch (err) { | 3093 | switch (err) { |
| 3091 | case -ENODEV: | 3094 | case -ENODEV: |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d0631b6cfd53..62f475e31077 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2540 | const u8 *mac, struct station_info *sinfo) | 2540 | const u8 *mac, struct station_info *sinfo) |
| 2541 | { | 2541 | { |
| 2542 | struct brcmf_if *ifp = netdev_priv(ndev); | 2542 | struct brcmf_if *ifp = netdev_priv(ndev); |
| 2543 | struct brcmf_scb_val_le scb_val; | ||
| 2543 | s32 err = 0; | 2544 | s32 err = 0; |
| 2544 | struct brcmf_sta_info_le sta_info_le; | 2545 | struct brcmf_sta_info_le sta_info_le; |
| 2545 | u32 sta_flags; | 2546 | u32 sta_flags; |
| 2546 | u32 is_tdls_peer; | 2547 | u32 is_tdls_peer; |
| 2547 | s32 total_rssi; | 2548 | s32 total_rssi; |
| 2548 | s32 count_rssi; | 2549 | s32 count_rssi; |
| 2550 | int rssi; | ||
| 2549 | u32 i; | 2551 | u32 i; |
| 2550 | 2552 | ||
| 2551 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); | 2553 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); |
| @@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2629 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | 2631 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); |
| 2630 | total_rssi /= count_rssi; | 2632 | total_rssi /= count_rssi; |
| 2631 | sinfo->signal = total_rssi; | 2633 | sinfo->signal = total_rssi; |
| 2634 | } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, | ||
| 2635 | &ifp->vif->sme_state)) { | ||
| 2636 | memset(&scb_val, 0, sizeof(scb_val)); | ||
| 2637 | err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, | ||
| 2638 | &scb_val, sizeof(scb_val)); | ||
| 2639 | if (err) { | ||
| 2640 | brcmf_err("Could not get rssi (%d)\n", err); | ||
| 2641 | goto done; | ||
| 2642 | } else { | ||
| 2643 | rssi = le32_to_cpu(scb_val.val); | ||
| 2644 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | ||
| 2645 | sinfo->signal = rssi; | ||
| 2646 | brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); | ||
| 2647 | } | ||
| 2632 | } | 2648 | } |
| 2633 | } | 2649 | } |
| 2634 | done: | 2650 | done: |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 68f1ce02f4bf..2b9a2bc429d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c | |||
| @@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 1157 | brcmu_pkt_buf_free_skb(skb); | 1157 | brcmu_pkt_buf_free_skb(skb); |
| 1158 | return; | 1158 | return; |
| 1159 | } | 1159 | } |
| 1160 | |||
| 1161 | skb->protocol = eth_type_trans(skb, ifp->ndev); | ||
| 1160 | brcmf_netif_rx(ifp, skb); | 1162 | brcmf_netif_rx(ifp, skb); |
| 1161 | } | 1163 | } |
| 1162 | 1164 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9ed0ed1bf514..4dd5adcdd29b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
| 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || | 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || |
| 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || | 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || |
| 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || | 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || |
| 2779 | !info->attrs[HWSIM_ATTR_SIGNAL] || | ||
| 2779 | !info->attrs[HWSIM_ATTR_TX_INFO]) | 2780 | !info->attrs[HWSIM_ATTR_TX_INFO]) |
| 2780 | goto out; | 2781 | goto out; |
| 2781 | 2782 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 0f48048b8654..3a0faa8fe9d4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c | |||
| @@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m); | |||
| 54 | void rtl_addr_delay(u32 addr) | 54 | void rtl_addr_delay(u32 addr) |
| 55 | { | 55 | { |
| 56 | if (addr == 0xfe) | 56 | if (addr == 0xfe) |
| 57 | msleep(50); | 57 | mdelay(50); |
| 58 | else if (addr == 0xfd) | 58 | else if (addr == 0xfd) |
| 59 | msleep(5); | 59 | msleep(5); |
| 60 | else if (addr == 0xfc) | 60 | else if (addr == 0xfc) |
| @@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, | |||
| 75 | rtl_addr_delay(addr); | 75 | rtl_addr_delay(addr); |
| 76 | } else { | 76 | } else { |
| 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); | 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); |
| 78 | usleep_range(1, 2); | 78 | udelay(1); |
| 79 | } | 79 | } |
| 80 | } | 80 | } |
| 81 | EXPORT_SYMBOL(rtl_rfreg_delay); | 81 | EXPORT_SYMBOL(rtl_rfreg_delay); |
| @@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data) | |||
| 86 | rtl_addr_delay(addr); | 86 | rtl_addr_delay(addr); |
| 87 | } else { | 87 | } else { |
| 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); | 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); |
| 89 | usleep_range(1, 2); | 89 | udelay(1); |
| 90 | } | 90 | } |
| 91 | } | 91 | } |
| 92 | EXPORT_SYMBOL(rtl_bb_delay); | 92 | EXPORT_SYMBOL(rtl_bb_delay); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 78dca3193ca4..befac5b19490 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
| 1679 | 1679 | ||
| 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) | 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) |
| 1681 | { | 1681 | { |
| 1682 | struct pci_dev *pdev = to_pci_dev(dev->dev); | ||
| 1683 | int bars; | ||
| 1684 | |||
| 1682 | if (dev->bar) | 1685 | if (dev->bar) |
| 1683 | iounmap(dev->bar); | 1686 | iounmap(dev->bar); |
| 1684 | pci_release_regions(to_pci_dev(dev->dev)); | 1687 | |
| 1688 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
| 1689 | pci_release_selected_regions(pdev, bars); | ||
| 1685 | } | 1690 | } |
| 1686 | 1691 | ||
| 1687 | static void nvme_pci_disable(struct nvme_dev *dev) | 1692 | static void nvme_pci_disable(struct nvme_dev *dev) |
| @@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev) | |||
| 1924 | 1929 | ||
| 1925 | return 0; | 1930 | return 0; |
| 1926 | release: | 1931 | release: |
| 1927 | pci_release_regions(pdev); | 1932 | pci_release_selected_regions(pdev, bars); |
| 1928 | return -ENODEV; | 1933 | return -ENODEV; |
| 1929 | } | 1934 | } |
| 1930 | 1935 | ||
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 14f2f8c7c260..33daffc4392c 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 395 | struct device_node **nodepp) | 395 | struct device_node **nodepp) |
| 396 | { | 396 | { |
| 397 | struct device_node *root; | 397 | struct device_node *root; |
| 398 | int offset = 0, depth = 0; | 398 | int offset = 0, depth = 0, initial_depth = 0; |
| 399 | #define FDT_MAX_DEPTH 64 | 399 | #define FDT_MAX_DEPTH 64 |
| 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; | 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; |
| 401 | struct device_node *nps[FDT_MAX_DEPTH]; | 401 | struct device_node *nps[FDT_MAX_DEPTH]; |
| @@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 405 | if (nodepp) | 405 | if (nodepp) |
| 406 | *nodepp = NULL; | 406 | *nodepp = NULL; |
| 407 | 407 | ||
| 408 | /* | ||
| 409 | * We're unflattening device sub-tree if @dad is valid. There are | ||
| 410 | * possibly multiple nodes in the first level of depth. We need | ||
| 411 | * set @depth to 1 to make fdt_next_node() happy as it bails | ||
| 412 | * immediately when negative @depth is found. Otherwise, the device | ||
| 413 | * nodes except the first one won't be unflattened successfully. | ||
| 414 | */ | ||
| 415 | if (dad) | ||
| 416 | depth = initial_depth = 1; | ||
| 417 | |||
| 408 | root = dad; | 418 | root = dad; |
| 409 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; | 419 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; |
| 410 | nps[depth] = dad; | 420 | nps[depth] = dad; |
| 421 | |||
| 411 | for (offset = 0; | 422 | for (offset = 0; |
| 412 | offset >= 0 && depth >= 0; | 423 | offset >= 0 && depth >= initial_depth; |
| 413 | offset = fdt_next_node(blob, offset, &depth)) { | 424 | offset = fdt_next_node(blob, offset, &depth)) { |
| 414 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) | 425 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) |
| 415 | continue; | 426 | continue; |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index e7bfc175b8e1..6ec743faabe8 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) | |||
| 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); | 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); |
| 387 | 387 | ||
| 388 | /** | 388 | /** |
| 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux irq number | 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number |
| 390 | * @dev: pointer to device tree node | 390 | * @dev: pointer to device tree node |
| 391 | * @index: zero-based index of the irq | 391 | * @index: zero-based index of the IRQ |
| 392 | * | ||
| 393 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | ||
| 394 | * is not yet created. | ||
| 395 | * | 392 | * |
| 393 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or | ||
| 394 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case | ||
| 395 | * of any other failure. | ||
| 396 | */ | 396 | */ |
| 397 | int of_irq_get(struct device_node *dev, int index) | 397 | int of_irq_get(struct device_node *dev, int index) |
| 398 | { | 398 | { |
| @@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index) | |||
| 413 | EXPORT_SYMBOL_GPL(of_irq_get); | 413 | EXPORT_SYMBOL_GPL(of_irq_get); |
| 414 | 414 | ||
| 415 | /** | 415 | /** |
| 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number | 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number |
| 417 | * @dev: pointer to device tree node | 417 | * @dev: pointer to device tree node |
| 418 | * @name: irq name | 418 | * @name: IRQ name |
| 419 | * | 419 | * |
| 420 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | 420 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or |
| 421 | * is not yet created, or error code in case of any other failure. | 421 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case |
| 422 | * of any other failure. | ||
| 422 | */ | 423 | */ |
| 423 | int of_irq_get_byname(struct device_node *dev, const char *name) | 424 | int of_irq_get_byname(struct device_node *dev, const char *name) |
| 424 | { | 425 | { |
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index ed01c0172e4a..216648233874 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node, | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | /* Need adjust the alignment to satisfy the CMA requirement */ | 129 | /* Need adjust the alignment to satisfy the CMA requirement */ |
| 130 | if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) | 130 | if (IS_ENABLED(CONFIG_CMA) |
| 131 | align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); | 131 | && of_flat_dt_is_compatible(node, "shared-dma-pool") |
| 132 | && of_get_flat_dt_prop(node, "reusable", NULL) | ||
| 133 | && !of_get_flat_dt_prop(node, "no-map", NULL)) { | ||
| 134 | unsigned long order = | ||
| 135 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); | ||
| 136 | |||
| 137 | align = max(align, (phys_addr_t)PAGE_SIZE << order); | ||
| 138 | } | ||
| 132 | 139 | ||
| 133 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); | 140 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); |
| 134 | if (prop) { | 141 | if (prop) { |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f2d01d4d9364..1b8304e1efaa 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
| @@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
| 950 | 950 | ||
| 951 | /* For SPIs, we need to track the affinity per IRQ */ | 951 | /* For SPIs, we need to track the affinity per IRQ */ |
| 952 | if (using_spi) { | 952 | if (using_spi) { |
| 953 | if (i >= pdev->num_resources) { | 953 | if (i >= pdev->num_resources) |
| 954 | of_node_put(dn); | ||
| 955 | break; | 954 | break; |
| 956 | } | ||
| 957 | 955 | ||
| 958 | irqs[i] = cpu; | 956 | irqs[i] = cpu; |
| 959 | } | 957 | } |
| 960 | 958 | ||
| 961 | /* Keep track of the CPUs containing this PMU type */ | 959 | /* Keep track of the CPUs containing this PMU type */ |
| 962 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | 960 | cpumask_set_cpu(cpu, &pmu->supported_cpus); |
| 963 | of_node_put(dn); | ||
| 964 | i++; | 961 | i++; |
| 965 | } while (1); | 962 | } while (1); |
| 966 | 963 | ||
| @@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 995 | 992 | ||
| 996 | armpmu_init(pmu); | 993 | armpmu_init(pmu); |
| 997 | 994 | ||
| 998 | if (!__oprofile_cpu_pmu) | ||
| 999 | __oprofile_cpu_pmu = pmu; | ||
| 1000 | |||
| 1001 | pmu->plat_device = pdev; | 995 | pmu->plat_device = pdev; |
| 1002 | 996 | ||
| 1003 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { | 997 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { |
| @@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 1033 | if (ret) | 1027 | if (ret) |
| 1034 | goto out_destroy; | 1028 | goto out_destroy; |
| 1035 | 1029 | ||
| 1030 | if (!__oprofile_cpu_pmu) | ||
| 1031 | __oprofile_cpu_pmu = pmu; | ||
| 1032 | |||
| 1036 | pr_info("enabled with %s PMU driver, %d counters available\n", | 1033 | pr_info("enabled with %s PMU driver, %d counters available\n", |
| 1037 | pmu->name, pmu->num_events); | 1034 | pmu->name, pmu->num_events); |
| 1038 | 1035 | ||
| @@ -1043,6 +1040,7 @@ out_destroy: | |||
| 1043 | out_free: | 1040 | out_free: |
| 1044 | pr_info("%s: failed to register PMU devices!\n", | 1041 | pr_info("%s: failed to register PMU devices!\n", |
| 1045 | of_node_full_name(node)); | 1042 | of_node_full_name(node)); |
| 1043 | kfree(pmu->irq_affinity); | ||
| 1046 | kfree(pmu); | 1044 | kfree(pmu); |
| 1047 | return ret; | 1045 | return ret; |
| 1048 | } | 1046 | } |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 207b13b618cf..a607655d7830 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c | |||
| @@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) | |||
| 1256 | const struct mtk_desc_pin *pin; | 1256 | const struct mtk_desc_pin *pin; |
| 1257 | 1257 | ||
| 1258 | chained_irq_enter(chip, desc); | 1258 | chained_irq_enter(chip, desc); |
| 1259 | for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { | 1259 | for (eint_num = 0; |
| 1260 | eint_num < pctl->devdata->ap_num; | ||
| 1261 | eint_num += 32, reg += 4) { | ||
| 1260 | status = readl(reg); | 1262 | status = readl(reg); |
| 1261 | reg += 4; | ||
| 1262 | while (status) { | 1263 | while (status) { |
| 1263 | offset = __ffs(status); | 1264 | offset = __ffs(status); |
| 1264 | index = eint_num + offset; | 1265 | index = eint_num + offset; |
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index ccbfc325c778..38faceff2f08 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c | |||
| @@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset) | |||
| 854 | 854 | ||
| 855 | clk_enable(nmk_chip->clk); | 855 | clk_enable(nmk_chip->clk); |
| 856 | 856 | ||
| 857 | dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); | 857 | dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); |
| 858 | 858 | ||
| 859 | clk_disable(nmk_chip->clk); | 859 | clk_disable(nmk_chip->clk); |
| 860 | 860 | ||
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 579fd65299a0..d637c933c8a9 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
| @@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
| 208 | break; | 208 | break; |
| 209 | 209 | ||
| 210 | case PTP_SYS_OFFSET: | 210 | case PTP_SYS_OFFSET: |
| 211 | sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); | 211 | sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); |
| 212 | if (!sysoff) { | 212 | if (IS_ERR(sysoff)) { |
| 213 | err = -ENOMEM; | 213 | err = PTR_ERR(sysoff); |
| 214 | break; | 214 | sysoff = NULL; |
| 215 | } | ||
| 216 | if (copy_from_user(sysoff, (void __user *)arg, | ||
| 217 | sizeof(*sysoff))) { | ||
| 218 | err = -EFAULT; | ||
| 219 | break; | 215 | break; |
| 220 | } | 216 | } |
| 221 | if (sysoff->n_samples > PTP_MAX_SAMPLES) { | 217 | if (sysoff->n_samples > PTP_MAX_SAMPLES) { |
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 56a17ec5b5ef..526bf23dcb49 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c | |||
| @@ -140,6 +140,19 @@ static const struct regulator_ops rpm_smps_ldo_ops = { | |||
| 140 | .enable = rpm_reg_enable, | 140 | .enable = rpm_reg_enable, |
| 141 | .disable = rpm_reg_disable, | 141 | .disable = rpm_reg_disable, |
| 142 | .is_enabled = rpm_reg_is_enabled, | 142 | .is_enabled = rpm_reg_is_enabled, |
| 143 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 144 | |||
| 145 | .get_voltage = rpm_reg_get_voltage, | ||
| 146 | .set_voltage = rpm_reg_set_voltage, | ||
| 147 | |||
| 148 | .set_load = rpm_reg_set_load, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static const struct regulator_ops rpm_smps_ldo_ops_fixed = { | ||
| 152 | .enable = rpm_reg_enable, | ||
| 153 | .disable = rpm_reg_disable, | ||
| 154 | .is_enabled = rpm_reg_is_enabled, | ||
| 155 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 143 | 156 | ||
| 144 | .get_voltage = rpm_reg_get_voltage, | 157 | .get_voltage = rpm_reg_get_voltage, |
| 145 | .set_voltage = rpm_reg_set_voltage, | 158 | .set_voltage = rpm_reg_set_voltage, |
| @@ -247,7 +260,7 @@ static const struct regulator_desc pm8941_nldo = { | |||
| 247 | static const struct regulator_desc pm8941_lnldo = { | 260 | static const struct regulator_desc pm8941_lnldo = { |
| 248 | .fixed_uV = 1740000, | 261 | .fixed_uV = 1740000, |
| 249 | .n_voltages = 1, | 262 | .n_voltages = 1, |
| 250 | .ops = &rpm_smps_ldo_ops, | 263 | .ops = &rpm_smps_ldo_ops_fixed, |
| 251 | }; | 264 | }; |
| 252 | 265 | ||
| 253 | static const struct regulator_desc pm8941_switch = { | 266 | static const struct regulator_desc pm8941_switch = { |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 8f90d9e77104..969c312de1be 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -621,6 +621,11 @@ struct aac_driver_ident | |||
| 621 | #define AAC_QUIRK_SCSI_32 0x0020 | 621 | #define AAC_QUIRK_SCSI_32 0x0020 |
| 622 | 622 | ||
| 623 | /* | 623 | /* |
| 624 | * SRC based adapters support the AifReqEvent functions | ||
| 625 | */ | ||
| 626 | #define AAC_QUIRK_SRC 0x0040 | ||
| 627 | |||
| 628 | /* | ||
| 624 | * The adapter interface specs all queues to be located in the same | 629 | * The adapter interface specs all queues to be located in the same |
| 625 | * physically contiguous block. The host structure that defines the | 630 | * physically contiguous block. The host structure that defines the |
| 626 | * commuication queues will assume they are each a separate physically | 631 | * commuication queues will assume they are each a separate physically |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a943bd230bc2..79871f3519ff 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
| @@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = { | |||
| 236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ | 236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ |
| 237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ | 237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ |
| 238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ | 238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ |
| 239 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */ | 239 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ |
| 240 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */ | 240 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ |
| 241 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */ | 241 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ |
| 242 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */ | 242 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */ |
| 243 | }; | 243 | }; |
| 244 | 244 | ||
| 245 | /** | 245 | /** |
| @@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1299 | else | 1299 | else |
| 1300 | shost->this_id = shost->max_id; | 1300 | shost->this_id = shost->max_id; |
| 1301 | 1301 | ||
| 1302 | aac_intr_normal(aac, 0, 2, 0, NULL); | 1302 | if (aac_drivers[index].quirks & AAC_QUIRK_SRC) |
| 1303 | aac_intr_normal(aac, 0, 2, 0, NULL); | ||
| 1303 | 1304 | ||
| 1304 | /* | 1305 | /* |
| 1305 | * dmb - we may need to move the setting of these parms somewhere else once | 1306 | * dmb - we may need to move the setting of these parms somewhere else once |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6a4df5a315e9..6bff13e7afc7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
| @@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
| 7975 | ActiveCableEventData = | 7975 | ActiveCableEventData = |
| 7976 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; | 7976 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; |
| 7977 | if (ActiveCableEventData->ReasonCode == | 7977 | if (ActiveCableEventData->ReasonCode == |
| 7978 | MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) | 7978 | MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) { |
| 7979 | pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", | 7979 | pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", |
| 7980 | ioc->name, ActiveCableEventData->ReceptacleID); | 7980 | ioc->name, ActiveCableEventData->ReceptacleID); |
| 7981 | pr_info("cannot be powered and devices connected to this active cable"); | 7981 | pr_info("cannot be powered and devices connected to this active cable"); |
| 7982 | pr_info("will not be seen. This active cable"); | 7982 | pr_info("will not be seen. This active cable"); |
| 7983 | pr_info("requires %d mW of power", | 7983 | pr_info("requires %d mW of power", |
| 7984 | ActiveCableEventData->ActiveCablePowerRequirement); | 7984 | ActiveCableEventData->ActiveCablePowerRequirement); |
| 7985 | } | ||
| 7985 | break; | 7986 | break; |
| 7986 | 7987 | ||
| 7987 | default: /* ignore the rest */ | 7988 | default: /* ignore the rest */ |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 3408578b08d6..ff41c310c900 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -230,6 +230,7 @@ static struct { | |||
| 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, | 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, |
| 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, | 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, |
| 233 | {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES}, | ||
| 233 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 234 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 234 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 235 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 235 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, | 236 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b2e332af0f51..c71344aebdbb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | /* | 823 | /* |
| 824 | * If we finished all bytes in the request we are done now. | 824 | * special case: failed zero length commands always need to |
| 825 | * drop down into the retry code. Otherwise, if we finished | ||
| 826 | * all bytes in the request we are done now. | ||
| 825 | */ | 827 | */ |
| 826 | if (!scsi_end_request(req, error, good_bytes, 0)) | 828 | if (!(blk_rq_bytes(req) == 0 && error) && |
| 829 | !scsi_end_request(req, error, good_bytes, 0)) | ||
| 827 | return; | 830 | return; |
| 828 | 831 | ||
| 829 | /* | 832 | /* |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 428c03ef02b2..60bff78e9ead 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp, | |||
| 1398 | **/ | 1398 | **/ |
| 1399 | static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) | 1399 | static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) |
| 1400 | { | 1400 | { |
| 1401 | struct scsi_disk *sdkp = scsi_disk(disk); | 1401 | struct scsi_disk *sdkp = scsi_disk_get(disk); |
| 1402 | struct scsi_device *sdp = sdkp->device; | 1402 | struct scsi_device *sdp; |
| 1403 | struct scsi_sense_hdr *sshdr = NULL; | 1403 | struct scsi_sense_hdr *sshdr = NULL; |
| 1404 | int retval; | 1404 | int retval; |
| 1405 | 1405 | ||
| 1406 | if (!sdkp) | ||
| 1407 | return 0; | ||
| 1408 | |||
| 1409 | sdp = sdkp->device; | ||
| 1406 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); | 1410 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); |
| 1407 | 1411 | ||
| 1408 | /* | 1412 | /* |
| @@ -1459,6 +1463,7 @@ out: | |||
| 1459 | kfree(sshdr); | 1463 | kfree(sshdr); |
| 1460 | retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; | 1464 | retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; |
| 1461 | sdp->changed = 0; | 1465 | sdp->changed = 0; |
| 1466 | scsi_disk_put(sdkp); | ||
| 1462 | return retval; | 1467 | return retval; |
| 1463 | } | 1468 | } |
| 1464 | 1469 | ||
| @@ -2862,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 2862 | if (sdkp->opt_xfer_blocks && | 2867 | if (sdkp->opt_xfer_blocks && |
| 2863 | sdkp->opt_xfer_blocks <= dev_max && | 2868 | sdkp->opt_xfer_blocks <= dev_max && |
| 2864 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && | 2869 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && |
| 2865 | sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE) | 2870 | logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) { |
| 2866 | rw_max = q->limits.io_opt = | 2871 | q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); |
| 2867 | sdkp->opt_xfer_blocks * sdp->sector_size; | 2872 | rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); |
| 2868 | else | 2873 | } else |
| 2869 | rw_max = BLK_DEF_MAX_SECTORS; | 2874 | rw_max = BLK_DEF_MAX_SECTORS; |
| 2870 | 2875 | ||
| 2871 | /* Combine with controller limits */ | 2876 | /* Combine with controller limits */ |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 654630bb7d0e..765a6f1ac1b7 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
| @@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo | |||
| 151 | return blocks << (ilog2(sdev->sector_size) - 9); | 151 | return blocks << (ilog2(sdev->sector_size) - 9); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) | ||
| 155 | { | ||
| 156 | return blocks * sdev->sector_size; | ||
| 157 | } | ||
| 158 | |||
| 154 | /* | 159 | /* |
| 155 | * A DIF-capable target device can be formatted with different | 160 | * A DIF-capable target device can be formatted with different |
| 156 | * protection schemes. Currently 0 through 3 are defined: | 161 | * protection schemes. Currently 0 through 3 are defined: |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6ceac4f2d4b2..5b4b47ed948b 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
| @@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 857 | goto free_power_table; | 857 | goto free_power_table; |
| 858 | } | 858 | } |
| 859 | 859 | ||
| 860 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 861 | cpufreq_dev->id); | ||
| 862 | |||
| 863 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 864 | &cpufreq_cooling_ops); | ||
| 865 | if (IS_ERR(cool_dev)) | ||
| 866 | goto remove_idr; | ||
| 867 | |||
| 868 | /* Fill freq-table in descending order of frequencies */ | 860 | /* Fill freq-table in descending order of frequencies */ |
| 869 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { | 861 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { |
| 870 | freq = find_next_max(table, freq); | 862 | freq = find_next_max(table, freq); |
| @@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 877 | pr_debug("%s: freq:%u KHz\n", __func__, freq); | 869 | pr_debug("%s: freq:%u KHz\n", __func__, freq); |
| 878 | } | 870 | } |
| 879 | 871 | ||
| 872 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 873 | cpufreq_dev->id); | ||
| 874 | |||
| 875 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 876 | &cpufreq_cooling_ops); | ||
| 877 | if (IS_ERR(cool_dev)) | ||
| 878 | goto remove_idr; | ||
| 879 | |||
| 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; | 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; |
| 881 | cpufreq_dev->cool_dev = cool_dev; | 881 | cpufreq_dev->cool_dev = cool_dev; |
| 882 | 882 | ||
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index 13d431cbd29e..a578cd257db4 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c | |||
| @@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev) | |||
| 177 | return -ENODEV; | 177 | return -ENODEV; |
| 178 | d->raw_bd = bd; | 178 | d->raw_bd = bd; |
| 179 | 179 | ||
| 180 | ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br); | 180 | ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL); |
| 181 | if (ret) | 181 | if (ret) |
| 182 | return ret; | 182 | return ret; |
| 183 | 183 | ||
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 82c4d2e45319..95103054c0e4 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig | |||
| @@ -120,17 +120,6 @@ config UNIX98_PTYS | |||
| 120 | All modern Linux systems use the Unix98 ptys. Say Y unless | 120 | All modern Linux systems use the Unix98 ptys. Say Y unless |
| 121 | you're on an embedded system and want to conserve memory. | 121 | you're on an embedded system and want to conserve memory. |
| 122 | 122 | ||
| 123 | config DEVPTS_MULTIPLE_INSTANCES | ||
| 124 | bool "Support multiple instances of devpts" | ||
| 125 | depends on UNIX98_PTYS | ||
| 126 | default n | ||
| 127 | ---help--- | ||
| 128 | Enable support for multiple instances of devpts filesystem. | ||
| 129 | If you want to have isolated PTY namespaces (eg: in containers), | ||
| 130 | say Y here. Otherwise, say N. If enabled, each mount of devpts | ||
| 131 | filesystem with the '-o newinstance' option will create an | ||
| 132 | independent PTY namespace. | ||
| 133 | |||
| 134 | config LEGACY_PTYS | 123 | config LEGACY_PTYS |
| 135 | bool "Legacy (BSD) PTY support" | 124 | bool "Legacy (BSD) PTY support" |
| 136 | default y | 125 | default y |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index dd4b8417e7f4..f856c4544eea 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
| @@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) | |||
| 668 | else | 668 | else |
| 669 | fsi = tty->link->driver_data; | 669 | fsi = tty->link->driver_data; |
| 670 | devpts_kill_index(fsi, tty->index); | 670 | devpts_kill_index(fsi, tty->index); |
| 671 | devpts_put_ref(fsi); | 671 | devpts_release(fsi); |
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | static const struct tty_operations ptm_unix98_ops = { | 674 | static const struct tty_operations ptm_unix98_ops = { |
| @@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 733 | if (retval) | 733 | if (retval) |
| 734 | return retval; | 734 | return retval; |
| 735 | 735 | ||
| 736 | fsi = devpts_get_ref(inode, filp); | 736 | fsi = devpts_acquire(filp); |
| 737 | retval = -ENODEV; | 737 | if (IS_ERR(fsi)) { |
| 738 | if (!fsi) | 738 | retval = PTR_ERR(fsi); |
| 739 | goto out_free_file; | 739 | goto out_free_file; |
| 740 | } | ||
| 740 | 741 | ||
| 741 | /* find a device that is not in use. */ | 742 | /* find a device that is not in use. */ |
| 742 | mutex_lock(&devpts_mutex); | 743 | mutex_lock(&devpts_mutex); |
| @@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 745 | 746 | ||
| 746 | retval = index; | 747 | retval = index; |
| 747 | if (index < 0) | 748 | if (index < 0) |
| 748 | goto out_put_ref; | 749 | goto out_put_fsi; |
| 749 | 750 | ||
| 750 | 751 | ||
| 751 | mutex_lock(&tty_mutex); | 752 | mutex_lock(&tty_mutex); |
| @@ -789,8 +790,8 @@ err_release: | |||
| 789 | return retval; | 790 | return retval; |
| 790 | out: | 791 | out: |
| 791 | devpts_kill_index(fsi, index); | 792 | devpts_kill_index(fsi, index); |
| 792 | out_put_ref: | 793 | out_put_fsi: |
| 793 | devpts_put_ref(fsi); | 794 | devpts_release(fsi); |
| 794 | out_free_file: | 795 | out_free_file: |
| 795 | tty_free_file(filp); | 796 | tty_free_file(filp); |
| 796 | return retval; | 797 | return retval; |
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 93601407dab8..688691d9058d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c | |||
| @@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, | |||
| 749 | if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) | 749 | if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) |
| 750 | return count; | 750 | return count; |
| 751 | } else { | 751 | } else { |
| 752 | if (pci_read_vpd(pdev, addr, 4, &data) != 4) | 752 | data = 0; |
| 753 | if (pci_read_vpd(pdev, addr, 4, &data) < 0) | ||
| 753 | return count; | 754 | return count; |
| 754 | *pdata = cpu_to_le32(data); | 755 | *pdata = cpu_to_le32(data); |
| 755 | } | 756 | } |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index e9ea3fef144a..15ecfc9c5f6c 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
| @@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) | |||
| 228 | 228 | ||
| 229 | static void vfio_intx_disable(struct vfio_pci_device *vdev) | 229 | static void vfio_intx_disable(struct vfio_pci_device *vdev) |
| 230 | { | 230 | { |
| 231 | vfio_intx_set_signal(vdev, -1); | ||
| 232 | vfio_virqfd_disable(&vdev->ctx[0].unmask); | 231 | vfio_virqfd_disable(&vdev->ctx[0].unmask); |
| 233 | vfio_virqfd_disable(&vdev->ctx[0].mask); | 232 | vfio_virqfd_disable(&vdev->ctx[0].mask); |
| 233 | vfio_intx_set_signal(vdev, -1); | ||
| 234 | vdev->irq_type = VFIO_PCI_NUM_IRQS; | 234 | vdev->irq_type = VFIO_PCI_NUM_IRQS; |
| 235 | vdev->num_ctx = 0; | 235 | vdev->num_ctx = 0; |
| 236 | kfree(vdev->ctx); | 236 | kfree(vdev->ctx); |
| @@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) | |||
| 401 | struct pci_dev *pdev = vdev->pdev; | 401 | struct pci_dev *pdev = vdev->pdev; |
| 402 | int i; | 402 | int i; |
| 403 | 403 | ||
| 404 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | ||
| 405 | |||
| 406 | for (i = 0; i < vdev->num_ctx; i++) { | 404 | for (i = 0; i < vdev->num_ctx; i++) { |
| 407 | vfio_virqfd_disable(&vdev->ctx[i].unmask); | 405 | vfio_virqfd_disable(&vdev->ctx[i].unmask); |
| 408 | vfio_virqfd_disable(&vdev->ctx[i].mask); | 406 | vfio_virqfd_disable(&vdev->ctx[i].mask); |
| 409 | } | 407 | } |
| 410 | 408 | ||
| 409 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | ||
| 410 | |||
| 411 | if (msix) { | 411 | if (msix) { |
| 412 | pci_disable_msix(vdev->pdev); | 412 | pci_disable_msix(vdev->pdev); |
| 413 | kfree(vdev->msix); | 413 | kfree(vdev->msix); |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 15a65823aad9..2ba19424e4a1 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, | |||
| 515 | unsigned long pfn, long npage, int prot) | 515 | unsigned long pfn, long npage, int prot) |
| 516 | { | 516 | { |
| 517 | long i; | 517 | long i; |
| 518 | int ret; | 518 | int ret = 0; |
| 519 | 519 | ||
| 520 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { | 520 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { |
| 521 | ret = iommu_map(domain->domain, iova, | 521 | ret = iommu_map(domain->domain, iova, |
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c index 8ea531d2652c..bbfe7e2d4332 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c | |||
| @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) | |||
| 51 | { | 51 | { |
| 52 | void __iomem *base = core->base; | 52 | void __iomem *base = core->base; |
| 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ | 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ |
| 54 | const unsigned ss_scl_high = 4000; /* ns */ | 54 | const unsigned ss_scl_high = 4600; /* ns */ |
| 55 | const unsigned ss_scl_low = 4700; /* ns */ | 55 | const unsigned ss_scl_low = 5400; /* ns */ |
| 56 | const unsigned fs_scl_high = 600; /* ns */ | 56 | const unsigned fs_scl_high = 600; /* ns */ |
| 57 | const unsigned fs_scl_low = 1300; /* ns */ | 57 | const unsigned fs_scl_low = 1300; /* ns */ |
| 58 | const unsigned sda_hold = 1000; /* ns */ | 58 | const unsigned sda_hold = 1000; /* ns */ |
| @@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, | |||
| 442 | 442 | ||
| 443 | c = (ptr[1] >> 6) & 0x3; | 443 | c = (ptr[1] >> 6) & 0x3; |
| 444 | m = (ptr[1] >> 4) & 0x3; | 444 | m = (ptr[1] >> 4) & 0x3; |
| 445 | r = (ptr[1] >> 0) & 0x3; | 445 | r = (ptr[1] >> 0) & 0xf; |
| 446 | 446 | ||
| 447 | itc = (ptr[2] >> 7) & 0x1; | 447 | itc = (ptr[2] >> 7) & 0x1; |
| 448 | ec = (ptr[2] >> 4) & 0x7; | 448 | ec = (ptr[2] >> 4) & 0x7; |
