diff options
Diffstat (limited to 'drivers')
566 files changed, 6761 insertions, 3839 deletions
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index 4c745bf389fe..161f91539ae6 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c | |||
@@ -42,7 +42,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, | |||
42 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { | 42 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
43 | struct acpi_nfit_system_address *spa = nfit_spa->spa; | 43 | struct acpi_nfit_system_address *spa = nfit_spa->spa; |
44 | 44 | ||
45 | if (nfit_spa_type(spa) == NFIT_SPA_PM) | 45 | if (nfit_spa_type(spa) != NFIT_SPA_PM) |
46 | continue; | 46 | continue; |
47 | /* find the spa that covers the mce addr */ | 47 | /* find the spa that covers the mce addr */ |
48 | if (spa->address > mce->addr) | 48 | if (spa->address > mce->addr) |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ad9fc84a8601..e878fc799af7 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -2054,7 +2054,7 @@ int __init acpi_scan_init(void) | |||
2054 | 2054 | ||
2055 | static struct acpi_probe_entry *ape; | 2055 | static struct acpi_probe_entry *ape; |
2056 | static int acpi_probe_count; | 2056 | static int acpi_probe_count; |
2057 | static DEFINE_SPINLOCK(acpi_probe_lock); | 2057 | static DEFINE_MUTEX(acpi_probe_mutex); |
2058 | 2058 | ||
2059 | static int __init acpi_match_madt(struct acpi_subtable_header *header, | 2059 | static int __init acpi_match_madt(struct acpi_subtable_header *header, |
2060 | const unsigned long end) | 2060 | const unsigned long end) |
@@ -2073,7 +2073,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) | |||
2073 | if (acpi_disabled) | 2073 | if (acpi_disabled) |
2074 | return 0; | 2074 | return 0; |
2075 | 2075 | ||
2076 | spin_lock(&acpi_probe_lock); | 2076 | mutex_lock(&acpi_probe_mutex); |
2077 | for (ape = ap_head; nr; ape++, nr--) { | 2077 | for (ape = ap_head; nr; ape++, nr--) { |
2078 | if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) { | 2078 | if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) { |
2079 | acpi_probe_count = 0; | 2079 | acpi_probe_count = 0; |
@@ -2086,7 +2086,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) | |||
2086 | count++; | 2086 | count++; |
2087 | } | 2087 | } |
2088 | } | 2088 | } |
2089 | spin_unlock(&acpi_probe_lock); | 2089 | mutex_unlock(&acpi_probe_mutex); |
2090 | 2090 | ||
2091 | return count; | 2091 | return count; |
2092 | } | 2092 | } |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 7461a587b39b..dcf2c724fd06 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -2524,7 +2524,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host, | |||
2524 | 2524 | ||
2525 | /* Do not receive interrupts sent by dummy ports */ | 2525 | /* Do not receive interrupts sent by dummy ports */ |
2526 | if (!pp) { | 2526 | if (!pp) { |
2527 | disable_irq(irq + i); | 2527 | disable_irq(irq); |
2528 | continue; | 2528 | continue; |
2529 | } | 2529 | } |
2530 | 2530 | ||
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index 633aa2934a18..44f97ad3c88d 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c | |||
@@ -144,7 +144,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) | |||
144 | ap->ioaddr.altstatus_addr = base + 0x1E; | 144 | ap->ioaddr.altstatus_addr = base + 0x1E; |
145 | ap->ioaddr.bmdma_addr = base; | 145 | ap->ioaddr.bmdma_addr = base; |
146 | ata_sff_std_ports(&ap->ioaddr); | 146 | ata_sff_std_ports(&ap->ioaddr); |
147 | ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; | 147 | ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; |
148 | 148 | ||
149 | ninja32_program(base); | 149 | ninja32_program(base); |
150 | /* FIXME: Should we disable them at remove ? */ | 150 | /* FIXME: Should we disable them at remove ? */ |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index e097d355cc04..82a081ea4317 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -301,7 +301,7 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
301 | int (*callback)(struct device *); | 301 | int (*callback)(struct device *); |
302 | int retval; | 302 | int retval; |
303 | 303 | ||
304 | trace_rpm_idle(dev, rpmflags); | 304 | trace_rpm_idle_rcuidle(dev, rpmflags); |
305 | retval = rpm_check_suspend_allowed(dev); | 305 | retval = rpm_check_suspend_allowed(dev); |
306 | if (retval < 0) | 306 | if (retval < 0) |
307 | ; /* Conditions are wrong. */ | 307 | ; /* Conditions are wrong. */ |
@@ -337,7 +337,7 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
337 | dev->power.request_pending = true; | 337 | dev->power.request_pending = true; |
338 | queue_work(pm_wq, &dev->power.work); | 338 | queue_work(pm_wq, &dev->power.work); |
339 | } | 339 | } |
340 | trace_rpm_return_int(dev, _THIS_IP_, 0); | 340 | trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); |
341 | return 0; | 341 | return 0; |
342 | } | 342 | } |
343 | 343 | ||
@@ -352,7 +352,7 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
352 | wake_up_all(&dev->power.wait_queue); | 352 | wake_up_all(&dev->power.wait_queue); |
353 | 353 | ||
354 | out: | 354 | out: |
355 | trace_rpm_return_int(dev, _THIS_IP_, retval); | 355 | trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); |
356 | return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); | 356 | return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); |
357 | } | 357 | } |
358 | 358 | ||
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
419 | struct device *parent = NULL; | 419 | struct device *parent = NULL; |
420 | int retval; | 420 | int retval; |
421 | 421 | ||
422 | trace_rpm_suspend(dev, rpmflags); | 422 | trace_rpm_suspend_rcuidle(dev, rpmflags); |
423 | 423 | ||
424 | repeat: | 424 | repeat: |
425 | retval = rpm_check_suspend_allowed(dev); | 425 | retval = rpm_check_suspend_allowed(dev); |
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
549 | } | 549 | } |
550 | 550 | ||
551 | out: | 551 | out: |
552 | trace_rpm_return_int(dev, _THIS_IP_, retval); | 552 | trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); |
553 | 553 | ||
554 | return retval; | 554 | return retval; |
555 | 555 | ||
@@ -601,7 +601,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
601 | struct device *parent = NULL; | 601 | struct device *parent = NULL; |
602 | int retval = 0; | 602 | int retval = 0; |
603 | 603 | ||
604 | trace_rpm_resume(dev, rpmflags); | 604 | trace_rpm_resume_rcuidle(dev, rpmflags); |
605 | 605 | ||
606 | repeat: | 606 | repeat: |
607 | if (dev->power.runtime_error) | 607 | if (dev->power.runtime_error) |
@@ -764,7 +764,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
764 | spin_lock_irq(&dev->power.lock); | 764 | spin_lock_irq(&dev->power.lock); |
765 | } | 765 | } |
766 | 766 | ||
767 | trace_rpm_return_int(dev, _THIS_IP_, retval); | 767 | trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); |
768 | 768 | ||
769 | return retval; | 769 | return retval; |
770 | } | 770 | } |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index aa56af87d941..b11af3f2c1db 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | |||
404 | unsigned int new_base_reg, new_top_reg; | 404 | unsigned int new_base_reg, new_top_reg; |
405 | unsigned int min, max; | 405 | unsigned int min, max; |
406 | unsigned int max_dist; | 406 | unsigned int max_dist; |
407 | unsigned int dist, best_dist = UINT_MAX; | ||
407 | 408 | ||
408 | max_dist = map->reg_stride * sizeof(*rbnode_tmp) / | 409 | max_dist = map->reg_stride * sizeof(*rbnode_tmp) / |
409 | map->cache_word_size; | 410 | map->cache_word_size; |
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | |||
423 | &base_reg, &top_reg); | 424 | &base_reg, &top_reg); |
424 | 425 | ||
425 | if (base_reg <= max && top_reg >= min) { | 426 | if (base_reg <= max && top_reg >= min) { |
426 | new_base_reg = min(reg, base_reg); | 427 | if (reg < base_reg) |
427 | new_top_reg = max(reg, top_reg); | 428 | dist = base_reg - reg; |
428 | } else { | 429 | else if (reg > top_reg) |
429 | if (max < base_reg) | 430 | dist = reg - top_reg; |
430 | node = node->rb_left; | ||
431 | else | 431 | else |
432 | node = node->rb_right; | 432 | dist = 0; |
433 | 433 | if (dist < best_dist) { | |
434 | continue; | 434 | rbnode = rbnode_tmp; |
435 | best_dist = dist; | ||
436 | new_base_reg = min(reg, base_reg); | ||
437 | new_top_reg = max(reg, top_reg); | ||
438 | } | ||
435 | } | 439 | } |
436 | 440 | ||
437 | ret = regcache_rbtree_insert_to_block(map, rbnode_tmp, | 441 | /* |
442 | * Keep looking, we want to choose the closest block, | ||
443 | * otherwise we might end up creating overlapping | ||
444 | * blocks, which breaks the rbtree. | ||
445 | */ | ||
446 | if (reg < base_reg) | ||
447 | node = node->rb_left; | ||
448 | else if (reg > top_reg) | ||
449 | node = node->rb_right; | ||
450 | else | ||
451 | break; | ||
452 | } | ||
453 | |||
454 | if (rbnode) { | ||
455 | ret = regcache_rbtree_insert_to_block(map, rbnode, | ||
438 | new_base_reg, | 456 | new_base_reg, |
439 | new_top_reg, reg, | 457 | new_top_reg, reg, |
440 | value); | 458 | value); |
441 | if (ret) | 459 | if (ret) |
442 | return ret; | 460 | return ret; |
443 | rbtree_ctx->cached_rbnode = rbnode_tmp; | 461 | rbtree_ctx->cached_rbnode = rbnode; |
444 | return 0; | 462 | return 0; |
445 | } | 463 | } |
446 | 464 | ||
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index df7ff7290821..4e582561e1e7 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map) | |||
38 | 38 | ||
39 | /* calculate the size of reg_defaults */ | 39 | /* calculate the size of reg_defaults */ |
40 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) | 40 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) |
41 | if (!regmap_volatile(map, i * map->reg_stride)) | 41 | if (regmap_readable(map, i * map->reg_stride) && |
42 | !regmap_volatile(map, i * map->reg_stride)) | ||
42 | count++; | 43 | count++; |
43 | 44 | ||
44 | /* all registers are volatile, so just bypass */ | 45 | /* all registers are unreadable or volatile, so just bypass */ |
45 | if (!count) { | 46 | if (!count) { |
46 | map->cache_bypass = true; | 47 | map->cache_bypass = true; |
47 | return 0; | 48 | return 0; |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 51fa7d66a393..e964d068874d 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -1474,6 +1474,12 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1474 | ret = map->bus->write(map->bus_context, buf, len); | 1474 | ret = map->bus->write(map->bus_context, buf, len); |
1475 | 1475 | ||
1476 | kfree(buf); | 1476 | kfree(buf); |
1477 | } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { | ||
1478 | /* regcache_drop_region() takes lock that we already have, | ||
1479 | * thus call map->cache_ops->drop() directly | ||
1480 | */ | ||
1481 | if (map->cache_ops && map->cache_ops->drop) | ||
1482 | map->cache_ops->drop(map, reg, reg + 1); | ||
1477 | } | 1483 | } |
1478 | 1484 | ||
1479 | trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); | 1485 | trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b71a9c767009..e3d8e4ced4a2 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
3706 | if (UFDCS->rawcmd == 1) | 3706 | if (UFDCS->rawcmd == 1) |
3707 | UFDCS->rawcmd = 2; | 3707 | UFDCS->rawcmd = 2; |
3708 | 3708 | ||
3709 | if (mode & (FMODE_READ|FMODE_WRITE)) { | 3709 | if (!(mode & FMODE_NDELAY)) { |
3710 | UDRS->last_checked = 0; | 3710 | if (mode & (FMODE_READ|FMODE_WRITE)) { |
3711 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); | 3711 | UDRS->last_checked = 0; |
3712 | check_disk_change(bdev); | 3712 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); |
3713 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) | 3713 | check_disk_change(bdev); |
3714 | goto out; | 3714 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) |
3715 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) | 3715 | goto out; |
3716 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) | ||
3717 | goto out; | ||
3718 | } | ||
3719 | res = -EROFS; | ||
3720 | if ((mode & FMODE_WRITE) && | ||
3721 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) | ||
3716 | goto out; | 3722 | goto out; |
3717 | } | 3723 | } |
3718 | |||
3719 | res = -EROFS; | ||
3720 | |||
3721 | if ((mode & FMODE_WRITE) && | ||
3722 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) | ||
3723 | goto out; | ||
3724 | |||
3725 | mutex_unlock(&open_lock); | 3724 | mutex_unlock(&open_lock); |
3726 | mutex_unlock(&floppy_mutex); | 3725 | mutex_unlock(&floppy_mutex); |
3727 | return 0; | 3726 | return 0; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index be4fea6a5dd3..88ef6d4729b4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -189,6 +189,8 @@ struct blkfront_info | |||
189 | struct mutex mutex; | 189 | struct mutex mutex; |
190 | struct xenbus_device *xbdev; | 190 | struct xenbus_device *xbdev; |
191 | struct gendisk *gd; | 191 | struct gendisk *gd; |
192 | u16 sector_size; | ||
193 | unsigned int physical_sector_size; | ||
192 | int vdevice; | 194 | int vdevice; |
193 | blkif_vdev_t handle; | 195 | blkif_vdev_t handle; |
194 | enum blkif_state connected; | 196 | enum blkif_state connected; |
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = { | |||
910 | .map_queue = blk_mq_map_queue, | 912 | .map_queue = blk_mq_map_queue, |
911 | }; | 913 | }; |
912 | 914 | ||
915 | static void blkif_set_queue_limits(struct blkfront_info *info) | ||
916 | { | ||
917 | struct request_queue *rq = info->rq; | ||
918 | struct gendisk *gd = info->gd; | ||
919 | unsigned int segments = info->max_indirect_segments ? : | ||
920 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
921 | |||
922 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | ||
923 | |||
924 | if (info->feature_discard) { | ||
925 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | ||
926 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | ||
927 | rq->limits.discard_granularity = info->discard_granularity; | ||
928 | rq->limits.discard_alignment = info->discard_alignment; | ||
929 | if (info->feature_secdiscard) | ||
930 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
931 | } | ||
932 | |||
933 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
934 | blk_queue_logical_block_size(rq, info->sector_size); | ||
935 | blk_queue_physical_block_size(rq, info->physical_sector_size); | ||
936 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
937 | |||
938 | /* Each segment in a request is up to an aligned page in size. */ | ||
939 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
940 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
941 | |||
942 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
943 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
944 | |||
945 | /* Make sure buffer addresses are sector-aligned. */ | ||
946 | blk_queue_dma_alignment(rq, 511); | ||
947 | |||
948 | /* Make sure we don't use bounce buffers. */ | ||
949 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
950 | } | ||
951 | |||
913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 952 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
914 | unsigned int physical_sector_size, | 953 | unsigned int physical_sector_size) |
915 | unsigned int segments) | ||
916 | { | 954 | { |
917 | struct request_queue *rq; | 955 | struct request_queue *rq; |
918 | struct blkfront_info *info = gd->private_data; | 956 | struct blkfront_info *info = gd->private_data; |
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
944 | } | 982 | } |
945 | 983 | ||
946 | rq->queuedata = info; | 984 | rq->queuedata = info; |
947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 985 | info->rq = gd->queue = rq; |
948 | 986 | info->gd = gd; | |
949 | if (info->feature_discard) { | 987 | info->sector_size = sector_size; |
950 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | 988 | info->physical_sector_size = physical_sector_size; |
951 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | 989 | blkif_set_queue_limits(info); |
952 | rq->limits.discard_granularity = info->discard_granularity; | ||
953 | rq->limits.discard_alignment = info->discard_alignment; | ||
954 | if (info->feature_secdiscard) | ||
955 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
956 | } | ||
957 | |||
958 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
959 | blk_queue_logical_block_size(rq, sector_size); | ||
960 | blk_queue_physical_block_size(rq, physical_sector_size); | ||
961 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
962 | |||
963 | /* Each segment in a request is up to an aligned page in size. */ | ||
964 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
965 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
966 | |||
967 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
968 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
969 | |||
970 | /* Make sure buffer addresses are sector-aligned. */ | ||
971 | blk_queue_dma_alignment(rq, 511); | ||
972 | |||
973 | /* Make sure we don't use bounce buffers. */ | ||
974 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
975 | |||
976 | gd->queue = rq; | ||
977 | 990 | ||
978 | return 0; | 991 | return 0; |
979 | } | 992 | } |
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
1136 | gd->private_data = info; | 1149 | gd->private_data = info; |
1137 | set_capacity(gd, capacity); | 1150 | set_capacity(gd, capacity); |
1138 | 1151 | ||
1139 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, | 1152 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) { |
1140 | info->max_indirect_segments ? : | ||
1141 | BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | ||
1142 | del_gendisk(gd); | 1153 | del_gendisk(gd); |
1143 | goto release; | 1154 | goto release; |
1144 | } | 1155 | } |
1145 | 1156 | ||
1146 | info->rq = gd->queue; | ||
1147 | info->gd = gd; | ||
1148 | |||
1149 | xlvbd_flush(info); | 1157 | xlvbd_flush(info); |
1150 | 1158 | ||
1151 | if (vdisk_info & VDISK_READONLY) | 1159 | if (vdisk_info & VDISK_READONLY) |
@@ -1315,7 +1323,7 @@ free_shadow: | |||
1315 | rinfo->ring_ref[i] = GRANT_INVALID_REF; | 1323 | rinfo->ring_ref[i] = GRANT_INVALID_REF; |
1316 | } | 1324 | } |
1317 | } | 1325 | } |
1318 | free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE)); | 1326 | free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); |
1319 | rinfo->ring.sring = NULL; | 1327 | rinfo->ring.sring = NULL; |
1320 | 1328 | ||
1321 | if (rinfo->irq) | 1329 | if (rinfo->irq) |
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info) | |||
2007 | struct split_bio *split_bio; | 2015 | struct split_bio *split_bio; |
2008 | 2016 | ||
2009 | blkfront_gather_backend_features(info); | 2017 | blkfront_gather_backend_features(info); |
2018 | /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ | ||
2019 | blkif_set_queue_limits(info); | ||
2010 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2020 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; |
2011 | blk_queue_max_segments(info->rq, segs); | 2021 | blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); |
2012 | 2022 | ||
2013 | for (r_index = 0; r_index < info->nr_rings; r_index++) { | 2023 | for (r_index = 0; r_index < info->nr_rings; r_index++) { |
2014 | struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; | 2024 | struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; |
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info) | |||
2432 | if (err) { | 2442 | if (err) { |
2433 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", | 2443 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", |
2434 | info->xbdev->otherend); | 2444 | info->xbdev->otherend); |
2435 | return; | 2445 | goto fail; |
2436 | } | 2446 | } |
2437 | 2447 | ||
2438 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 2448 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info) | |||
2445 | device_add_disk(&info->xbdev->dev, info->gd); | 2455 | device_add_disk(&info->xbdev->dev, info->gd); |
2446 | 2456 | ||
2447 | info->is_ready = 1; | 2457 | info->is_ready = 1; |
2458 | return; | ||
2459 | |||
2460 | fail: | ||
2461 | blkif_free(info, 0); | ||
2462 | return; | ||
2448 | } | 2463 | } |
2449 | 2464 | ||
2450 | /** | 2465 | /** |
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 5755907f836f..ffa7c9dcbd7a 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -551,7 +551,7 @@ static struct attribute *cci5xx_pmu_event_attrs[] = { | |||
551 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), | 551 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), |
552 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), | 552 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), |
553 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), | 553 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), |
554 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE), | 554 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), |
555 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), | 555 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), |
556 | NULL | 556 | NULL |
557 | }; | 557 | }; |
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 97a9185af433..884c0305e290 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c | |||
@@ -187,6 +187,7 @@ struct arm_ccn { | |||
187 | struct arm_ccn_component *xp; | 187 | struct arm_ccn_component *xp; |
188 | 188 | ||
189 | struct arm_ccn_dt dt; | 189 | struct arm_ccn_dt dt; |
190 | int mn_id; | ||
190 | }; | 191 | }; |
191 | 192 | ||
192 | static DEFINE_MUTEX(arm_ccn_mutex); | 193 | static DEFINE_MUTEX(arm_ccn_mutex); |
@@ -212,6 +213,7 @@ static int arm_ccn_node_to_xp_port(int node) | |||
212 | #define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff) | 213 | #define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff) |
213 | #define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff) | 214 | #define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff) |
214 | #define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3) | 215 | #define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3) |
216 | #define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3) | ||
215 | #define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7) | 217 | #define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7) |
216 | #define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1) | 218 | #define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1) |
217 | #define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf) | 219 | #define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf) |
@@ -241,6 +243,7 @@ static CCN_FORMAT_ATTR(xp, "config:0-7"); | |||
241 | static CCN_FORMAT_ATTR(type, "config:8-15"); | 243 | static CCN_FORMAT_ATTR(type, "config:8-15"); |
242 | static CCN_FORMAT_ATTR(event, "config:16-23"); | 244 | static CCN_FORMAT_ATTR(event, "config:16-23"); |
243 | static CCN_FORMAT_ATTR(port, "config:24-25"); | 245 | static CCN_FORMAT_ATTR(port, "config:24-25"); |
246 | static CCN_FORMAT_ATTR(bus, "config:24-25"); | ||
244 | static CCN_FORMAT_ATTR(vc, "config:26-28"); | 247 | static CCN_FORMAT_ATTR(vc, "config:26-28"); |
245 | static CCN_FORMAT_ATTR(dir, "config:29-29"); | 248 | static CCN_FORMAT_ATTR(dir, "config:29-29"); |
246 | static CCN_FORMAT_ATTR(mask, "config:30-33"); | 249 | static CCN_FORMAT_ATTR(mask, "config:30-33"); |
@@ -253,6 +256,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = { | |||
253 | &arm_ccn_pmu_format_attr_type.attr.attr, | 256 | &arm_ccn_pmu_format_attr_type.attr.attr, |
254 | &arm_ccn_pmu_format_attr_event.attr.attr, | 257 | &arm_ccn_pmu_format_attr_event.attr.attr, |
255 | &arm_ccn_pmu_format_attr_port.attr.attr, | 258 | &arm_ccn_pmu_format_attr_port.attr.attr, |
259 | &arm_ccn_pmu_format_attr_bus.attr.attr, | ||
256 | &arm_ccn_pmu_format_attr_vc.attr.attr, | 260 | &arm_ccn_pmu_format_attr_vc.attr.attr, |
257 | &arm_ccn_pmu_format_attr_dir.attr.attr, | 261 | &arm_ccn_pmu_format_attr_dir.attr.attr, |
258 | &arm_ccn_pmu_format_attr_mask.attr.attr, | 262 | &arm_ccn_pmu_format_attr_mask.attr.attr, |
@@ -328,6 +332,7 @@ struct arm_ccn_pmu_event { | |||
328 | static ssize_t arm_ccn_pmu_event_show(struct device *dev, | 332 | static ssize_t arm_ccn_pmu_event_show(struct device *dev, |
329 | struct device_attribute *attr, char *buf) | 333 | struct device_attribute *attr, char *buf) |
330 | { | 334 | { |
335 | struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); | ||
331 | struct arm_ccn_pmu_event *event = container_of(attr, | 336 | struct arm_ccn_pmu_event *event = container_of(attr, |
332 | struct arm_ccn_pmu_event, attr); | 337 | struct arm_ccn_pmu_event, attr); |
333 | ssize_t res; | 338 | ssize_t res; |
@@ -349,10 +354,17 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev, | |||
349 | break; | 354 | break; |
350 | case CCN_TYPE_XP: | 355 | case CCN_TYPE_XP: |
351 | res += snprintf(buf + res, PAGE_SIZE - res, | 356 | res += snprintf(buf + res, PAGE_SIZE - res, |
352 | ",xp=?,port=?,vc=?,dir=?"); | 357 | ",xp=?,vc=?"); |
353 | if (event->event == CCN_EVENT_WATCHPOINT) | 358 | if (event->event == CCN_EVENT_WATCHPOINT) |
354 | res += snprintf(buf + res, PAGE_SIZE - res, | 359 | res += snprintf(buf + res, PAGE_SIZE - res, |
355 | ",cmp_l=?,cmp_h=?,mask=?"); | 360 | ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?"); |
361 | else | ||
362 | res += snprintf(buf + res, PAGE_SIZE - res, | ||
363 | ",bus=?"); | ||
364 | |||
365 | break; | ||
366 | case CCN_TYPE_MN: | ||
367 | res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); | ||
356 | break; | 368 | break; |
357 | default: | 369 | default: |
358 | res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); | 370 | res += snprintf(buf + res, PAGE_SIZE - res, ",node=?"); |
@@ -383,9 +395,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj, | |||
383 | } | 395 | } |
384 | 396 | ||
385 | static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { | 397 | static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { |
386 | CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), | 398 | CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), |
387 | CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), | 399 | CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), |
388 | CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), | 400 | CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), |
389 | CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), | 401 | CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), |
390 | CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), | 402 | CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), |
391 | CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), | 403 | CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), |
@@ -733,9 +745,10 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
733 | 745 | ||
734 | if (has_branch_stack(event) || event->attr.exclude_user || | 746 | if (has_branch_stack(event) || event->attr.exclude_user || |
735 | event->attr.exclude_kernel || event->attr.exclude_hv || | 747 | event->attr.exclude_kernel || event->attr.exclude_hv || |
736 | event->attr.exclude_idle) { | 748 | event->attr.exclude_idle || event->attr.exclude_host || |
749 | event->attr.exclude_guest) { | ||
737 | dev_warn(ccn->dev, "Can't exclude execution levels!\n"); | 750 | dev_warn(ccn->dev, "Can't exclude execution levels!\n"); |
738 | return -EOPNOTSUPP; | 751 | return -EINVAL; |
739 | } | 752 | } |
740 | 753 | ||
741 | if (event->cpu < 0) { | 754 | if (event->cpu < 0) { |
@@ -759,6 +772,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
759 | 772 | ||
760 | /* Validate node/xp vs topology */ | 773 | /* Validate node/xp vs topology */ |
761 | switch (type) { | 774 | switch (type) { |
775 | case CCN_TYPE_MN: | ||
776 | if (node_xp != ccn->mn_id) { | ||
777 | dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp); | ||
778 | return -EINVAL; | ||
779 | } | ||
780 | break; | ||
762 | case CCN_TYPE_XP: | 781 | case CCN_TYPE_XP: |
763 | if (node_xp >= ccn->num_xps) { | 782 | if (node_xp >= ccn->num_xps) { |
764 | dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); | 783 | dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); |
@@ -886,6 +905,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) | |||
886 | struct arm_ccn_component *xp; | 905 | struct arm_ccn_component *xp; |
887 | u32 val, dt_cfg; | 906 | u32 val, dt_cfg; |
888 | 907 | ||
908 | /* Nothing to do for cycle counter */ | ||
909 | if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) | ||
910 | return; | ||
911 | |||
889 | if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) | 912 | if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) |
890 | xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; | 913 | xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; |
891 | else | 914 | else |
@@ -917,38 +940,17 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) | |||
917 | arm_ccn_pmu_read_counter(ccn, hw->idx)); | 940 | arm_ccn_pmu_read_counter(ccn, hw->idx)); |
918 | hw->state = 0; | 941 | hw->state = 0; |
919 | 942 | ||
920 | /* | ||
921 | * Pin the timer, so that the overflows are handled by the chosen | ||
922 | * event->cpu (this is the same one as presented in "cpumask" | ||
923 | * attribute). | ||
924 | */ | ||
925 | if (!ccn->irq) | ||
926 | hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), | ||
927 | HRTIMER_MODE_REL_PINNED); | ||
928 | |||
929 | /* Set the DT bus input, engaging the counter */ | 943 | /* Set the DT bus input, engaging the counter */ |
930 | arm_ccn_pmu_xp_dt_config(event, 1); | 944 | arm_ccn_pmu_xp_dt_config(event, 1); |
931 | } | 945 | } |
932 | 946 | ||
933 | static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) | 947 | static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) |
934 | { | 948 | { |
935 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | ||
936 | struct hw_perf_event *hw = &event->hw; | 949 | struct hw_perf_event *hw = &event->hw; |
937 | u64 timeout; | ||
938 | 950 | ||
939 | /* Disable counting, setting the DT bus to pass-through mode */ | 951 | /* Disable counting, setting the DT bus to pass-through mode */ |
940 | arm_ccn_pmu_xp_dt_config(event, 0); | 952 | arm_ccn_pmu_xp_dt_config(event, 0); |
941 | 953 | ||
942 | if (!ccn->irq) | ||
943 | hrtimer_cancel(&ccn->dt.hrtimer); | ||
944 | |||
945 | /* Let the DT bus drain */ | ||
946 | timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) + | ||
947 | ccn->num_xps; | ||
948 | while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) < | ||
949 | timeout) | ||
950 | cpu_relax(); | ||
951 | |||
952 | if (flags & PERF_EF_UPDATE) | 954 | if (flags & PERF_EF_UPDATE) |
953 | arm_ccn_pmu_event_update(event); | 955 | arm_ccn_pmu_event_update(event); |
954 | 956 | ||
@@ -988,7 +990,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) | |||
988 | 990 | ||
989 | /* Comparison values */ | 991 | /* Comparison values */ |
990 | writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); | 992 | writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); |
991 | writel((cmp_l >> 32) & 0xefffffff, | 993 | writel((cmp_l >> 32) & 0x7fffffff, |
992 | source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); | 994 | source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); |
993 | writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); | 995 | writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); |
994 | writel((cmp_h >> 32) & 0x0fffffff, | 996 | writel((cmp_h >> 32) & 0x0fffffff, |
@@ -996,7 +998,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) | |||
996 | 998 | ||
997 | /* Mask */ | 999 | /* Mask */ |
998 | writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); | 1000 | writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); |
999 | writel((mask_l >> 32) & 0xefffffff, | 1001 | writel((mask_l >> 32) & 0x7fffffff, |
1000 | source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); | 1002 | source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); |
1001 | writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); | 1003 | writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); |
1002 | writel((mask_h >> 32) & 0x0fffffff, | 1004 | writel((mask_h >> 32) & 0x0fffffff, |
@@ -1014,7 +1016,7 @@ static void arm_ccn_pmu_xp_event_config(struct perf_event *event) | |||
1014 | hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); | 1016 | hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); |
1015 | 1017 | ||
1016 | id = (CCN_CONFIG_VC(event->attr.config) << 4) | | 1018 | id = (CCN_CONFIG_VC(event->attr.config) << 4) | |
1017 | (CCN_CONFIG_PORT(event->attr.config) << 3) | | 1019 | (CCN_CONFIG_BUS(event->attr.config) << 3) | |
1018 | (CCN_CONFIG_EVENT(event->attr.config) << 0); | 1020 | (CCN_CONFIG_EVENT(event->attr.config) << 0); |
1019 | 1021 | ||
1020 | val = readl(source->base + CCN_XP_PMU_EVENT_SEL); | 1022 | val = readl(source->base + CCN_XP_PMU_EVENT_SEL); |
@@ -1099,15 +1101,31 @@ static void arm_ccn_pmu_event_config(struct perf_event *event) | |||
1099 | spin_unlock(&ccn->dt.config_lock); | 1101 | spin_unlock(&ccn->dt.config_lock); |
1100 | } | 1102 | } |
1101 | 1103 | ||
1104 | static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn) | ||
1105 | { | ||
1106 | return bitmap_weight(ccn->dt.pmu_counters_mask, | ||
1107 | CCN_NUM_PMU_EVENT_COUNTERS + 1); | ||
1108 | } | ||
1109 | |||
1102 | static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) | 1110 | static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) |
1103 | { | 1111 | { |
1104 | int err; | 1112 | int err; |
1105 | struct hw_perf_event *hw = &event->hw; | 1113 | struct hw_perf_event *hw = &event->hw; |
1114 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | ||
1106 | 1115 | ||
1107 | err = arm_ccn_pmu_event_alloc(event); | 1116 | err = arm_ccn_pmu_event_alloc(event); |
1108 | if (err) | 1117 | if (err) |
1109 | return err; | 1118 | return err; |
1110 | 1119 | ||
1120 | /* | ||
1121 | * Pin the timer, so that the overflows are handled by the chosen | ||
1122 | * event->cpu (this is the same one as presented in "cpumask" | ||
1123 | * attribute). | ||
1124 | */ | ||
1125 | if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1) | ||
1126 | hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), | ||
1127 | HRTIMER_MODE_REL_PINNED); | ||
1128 | |||
1111 | arm_ccn_pmu_event_config(event); | 1129 | arm_ccn_pmu_event_config(event); |
1112 | 1130 | ||
1113 | hw->state = PERF_HES_STOPPED; | 1131 | hw->state = PERF_HES_STOPPED; |
@@ -1120,9 +1138,14 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) | |||
1120 | 1138 | ||
1121 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) | 1139 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) |
1122 | { | 1140 | { |
1141 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | ||
1142 | |||
1123 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); | 1143 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); |
1124 | 1144 | ||
1125 | arm_ccn_pmu_event_release(event); | 1145 | arm_ccn_pmu_event_release(event); |
1146 | |||
1147 | if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0) | ||
1148 | hrtimer_cancel(&ccn->dt.hrtimer); | ||
1126 | } | 1149 | } |
1127 | 1150 | ||
1128 | static void arm_ccn_pmu_event_read(struct perf_event *event) | 1151 | static void arm_ccn_pmu_event_read(struct perf_event *event) |
@@ -1130,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event) | |||
1130 | arm_ccn_pmu_event_update(event); | 1153 | arm_ccn_pmu_event_update(event); |
1131 | } | 1154 | } |
1132 | 1155 | ||
1156 | static void arm_ccn_pmu_enable(struct pmu *pmu) | ||
1157 | { | ||
1158 | struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); | ||
1159 | |||
1160 | u32 val = readl(ccn->dt.base + CCN_DT_PMCR); | ||
1161 | val |= CCN_DT_PMCR__PMU_EN; | ||
1162 | writel(val, ccn->dt.base + CCN_DT_PMCR); | ||
1163 | } | ||
1164 | |||
1165 | static void arm_ccn_pmu_disable(struct pmu *pmu) | ||
1166 | { | ||
1167 | struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); | ||
1168 | |||
1169 | u32 val = readl(ccn->dt.base + CCN_DT_PMCR); | ||
1170 | val &= ~CCN_DT_PMCR__PMU_EN; | ||
1171 | writel(val, ccn->dt.base + CCN_DT_PMCR); | ||
1172 | } | ||
1173 | |||
1133 | static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) | 1174 | static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) |
1134 | { | 1175 | { |
1135 | u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); | 1176 | u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); |
@@ -1252,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) | |||
1252 | .start = arm_ccn_pmu_event_start, | 1293 | .start = arm_ccn_pmu_event_start, |
1253 | .stop = arm_ccn_pmu_event_stop, | 1294 | .stop = arm_ccn_pmu_event_stop, |
1254 | .read = arm_ccn_pmu_event_read, | 1295 | .read = arm_ccn_pmu_event_read, |
1296 | .pmu_enable = arm_ccn_pmu_enable, | ||
1297 | .pmu_disable = arm_ccn_pmu_disable, | ||
1255 | }; | 1298 | }; |
1256 | 1299 | ||
1257 | /* No overflow interrupt? Have to use a timer instead. */ | 1300 | /* No overflow interrupt? Have to use a timer instead. */ |
@@ -1361,6 +1404,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, | |||
1361 | 1404 | ||
1362 | switch (type) { | 1405 | switch (type) { |
1363 | case CCN_TYPE_MN: | 1406 | case CCN_TYPE_MN: |
1407 | ccn->mn_id = id; | ||
1408 | return 0; | ||
1364 | case CCN_TYPE_DT: | 1409 | case CCN_TYPE_DT: |
1365 | return 0; | 1410 | return 0; |
1366 | case CCN_TYPE_XP: | 1411 | case CCN_TYPE_XP: |
@@ -1471,8 +1516,9 @@ static int arm_ccn_probe(struct platform_device *pdev) | |||
1471 | /* Can set 'disable' bits, so can acknowledge interrupts */ | 1516 | /* Can set 'disable' bits, so can acknowledge interrupts */ |
1472 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, | 1517 | writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, |
1473 | ccn->base + CCN_MN_ERRINT_STATUS); | 1518 | ccn->base + CCN_MN_ERRINT_STATUS); |
1474 | err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0, | 1519 | err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, |
1475 | dev_name(ccn->dev), ccn); | 1520 | IRQF_NOBALANCING | IRQF_NO_THREAD, |
1521 | dev_name(ccn->dev), ccn); | ||
1476 | if (err) | 1522 | if (err) |
1477 | return err; | 1523 | return err; |
1478 | 1524 | ||
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index c3cb76b363c6..9efdf1de4035 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c | |||
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node) | |||
178 | 178 | ||
179 | parent = class_find_device(vexpress_config_class, NULL, bridge, | 179 | parent = class_find_device(vexpress_config_class, NULL, bridge, |
180 | vexpress_config_node_match); | 180 | vexpress_config_node_match); |
181 | of_node_put(bridge); | ||
181 | if (WARN_ON(!parent)) | 182 | if (WARN_ON(!parent)) |
182 | return -ENODEV; | 183 | return -ENODEV; |
183 | 184 | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 56ad5a5936a9..8c0770bf8881 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939 | |||
244 | 244 | ||
245 | config HW_RANDOM_MXC_RNGA | 245 | config HW_RANDOM_MXC_RNGA |
246 | tristate "Freescale i.MX RNGA Random Number Generator" | 246 | tristate "Freescale i.MX RNGA Random Number Generator" |
247 | depends on ARCH_HAS_RNGA | 247 | depends on SOC_IMX31 |
248 | default HW_RANDOM | 248 | default HW_RANDOM |
249 | ---help--- | 249 | ---help--- |
250 | This driver provides kernel-side support for the Random Number | 250 | This driver provides kernel-side support for the Random Number |
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 08c7e23ed535..0c75c3f1689f 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c | |||
@@ -957,7 +957,7 @@ int tpm2_auto_startup(struct tpm_chip *chip) | |||
957 | goto out; | 957 | goto out; |
958 | 958 | ||
959 | rc = tpm2_do_selftest(chip); | 959 | rc = tpm2_do_selftest(chip); |
960 | if (rc != TPM2_RC_INITIALIZE) { | 960 | if (rc != 0 && rc != TPM2_RC_INITIALIZE) { |
961 | dev_err(&chip->dev, "TPM self test failed\n"); | 961 | dev_err(&chip->dev, "TPM self test failed\n"); |
962 | goto out; | 962 | goto out; |
963 | } | 963 | } |
@@ -974,7 +974,6 @@ int tpm2_auto_startup(struct tpm_chip *chip) | |||
974 | } | 974 | } |
975 | } | 975 | } |
976 | 976 | ||
977 | return rc; | ||
978 | out: | 977 | out: |
979 | if (rc > 0) | 978 | if (rc > 0) |
980 | rc = -ENODEV; | 979 | rc = -ENODEV; |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index d2406fe25533..5da47e26a012 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -165,6 +165,12 @@ struct ports_device { | |||
165 | */ | 165 | */ |
166 | struct virtqueue *c_ivq, *c_ovq; | 166 | struct virtqueue *c_ivq, *c_ovq; |
167 | 167 | ||
168 | /* | ||
169 | * A control packet buffer for guest->host requests, protected | ||
170 | * by c_ovq_lock. | ||
171 | */ | ||
172 | struct virtio_console_control cpkt; | ||
173 | |||
168 | /* Array of per-port IO virtqueues */ | 174 | /* Array of per-port IO virtqueues */ |
169 | struct virtqueue **in_vqs, **out_vqs; | 175 | struct virtqueue **in_vqs, **out_vqs; |
170 | 176 | ||
@@ -560,28 +566,29 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
560 | unsigned int event, unsigned int value) | 566 | unsigned int event, unsigned int value) |
561 | { | 567 | { |
562 | struct scatterlist sg[1]; | 568 | struct scatterlist sg[1]; |
563 | struct virtio_console_control cpkt; | ||
564 | struct virtqueue *vq; | 569 | struct virtqueue *vq; |
565 | unsigned int len; | 570 | unsigned int len; |
566 | 571 | ||
567 | if (!use_multiport(portdev)) | 572 | if (!use_multiport(portdev)) |
568 | return 0; | 573 | return 0; |
569 | 574 | ||
570 | cpkt.id = cpu_to_virtio32(portdev->vdev, port_id); | ||
571 | cpkt.event = cpu_to_virtio16(portdev->vdev, event); | ||
572 | cpkt.value = cpu_to_virtio16(portdev->vdev, value); | ||
573 | |||
574 | vq = portdev->c_ovq; | 575 | vq = portdev->c_ovq; |
575 | 576 | ||
576 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | ||
577 | |||
578 | spin_lock(&portdev->c_ovq_lock); | 577 | spin_lock(&portdev->c_ovq_lock); |
579 | if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) { | 578 | |
579 | portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id); | ||
580 | portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event); | ||
581 | portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value); | ||
582 | |||
583 | sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control)); | ||
584 | |||
585 | if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) { | ||
580 | virtqueue_kick(vq); | 586 | virtqueue_kick(vq); |
581 | while (!virtqueue_get_buf(vq, &len) | 587 | while (!virtqueue_get_buf(vq, &len) |
582 | && !virtqueue_is_broken(vq)) | 588 | && !virtqueue_is_broken(vq)) |
583 | cpu_relax(); | 589 | cpu_relax(); |
584 | } | 590 | } |
591 | |||
585 | spin_unlock(&portdev->c_ovq_lock); | 592 | spin_unlock(&portdev->c_ovq_lock); |
586 | return 0; | 593 | return 0; |
587 | } | 594 | } |
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index d359c92e13a6..e38bf60c0ff4 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c | |||
@@ -69,6 +69,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { | |||
69 | DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), | 69 | DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), |
70 | DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), | 70 | DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), |
71 | DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), | 71 | DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), |
72 | DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), | ||
72 | 73 | ||
73 | /* Core Clock Outputs */ | 74 | /* Core Clock Outputs */ |
74 | DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), | 75 | DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), |
@@ -87,10 +88,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = { | |||
87 | DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1), | 88 | DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1), |
88 | DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1), | 89 | DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1), |
89 | 90 | ||
90 | DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074), | 91 | DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x0074), |
91 | DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078), | 92 | DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x0078), |
92 | DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268), | 93 | DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x0268), |
93 | DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c), | 94 | DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x026c), |
94 | 95 | ||
95 | DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1), | 96 | DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1), |
96 | DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1), | 97 | DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1), |
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index c109d80e7a8a..cdfabeb9a034 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c | |||
@@ -833,9 +833,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { | |||
833 | 833 | ||
834 | /* perihp */ | 834 | /* perihp */ |
835 | GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, | 835 | GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, |
836 | RK3399_CLKGATE_CON(5), 0, GFLAGS), | ||
837 | GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, | ||
838 | RK3399_CLKGATE_CON(5), 1, GFLAGS), | 836 | RK3399_CLKGATE_CON(5), 1, GFLAGS), |
837 | GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, | ||
838 | RK3399_CLKGATE_CON(5), 0, GFLAGS), | ||
839 | COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, | 839 | COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, |
840 | RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, | 840 | RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, |
841 | RK3399_CLKGATE_CON(5), 2, GFLAGS), | 841 | RK3399_CLKGATE_CON(5), 2, GFLAGS), |
@@ -923,9 +923,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { | |||
923 | RK3399_CLKGATE_CON(6), 14, GFLAGS), | 923 | RK3399_CLKGATE_CON(6), 14, GFLAGS), |
924 | 924 | ||
925 | GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED, | 925 | GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED, |
926 | RK3399_CLKGATE_CON(6), 12, GFLAGS), | ||
927 | GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED, | ||
928 | RK3399_CLKGATE_CON(6), 13, GFLAGS), | 926 | RK3399_CLKGATE_CON(6), 13, GFLAGS), |
927 | GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED, | ||
928 | RK3399_CLKGATE_CON(6), 12, GFLAGS), | ||
929 | COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED, | 929 | COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED, |
930 | RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS), | 930 | RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS), |
931 | GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED, | 931 | GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED, |
@@ -1071,7 +1071,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { | |||
1071 | /* vio */ | 1071 | /* vio */ |
1072 | COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED, | 1072 | COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED, |
1073 | RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS, | 1073 | RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS, |
1074 | RK3399_CLKGATE_CON(11), 10, GFLAGS), | 1074 | RK3399_CLKGATE_CON(11), 0, GFLAGS), |
1075 | COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0, | 1075 | COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0, |
1076 | RK3399_CLKSEL_CON(43), 0, 5, DFLAGS, | 1076 | RK3399_CLKSEL_CON(43), 0, 5, DFLAGS, |
1077 | RK3399_CLKGATE_CON(11), 1, GFLAGS), | 1077 | RK3399_CLKGATE_CON(11), 1, GFLAGS), |
@@ -1484,6 +1484,7 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = { | |||
1484 | "hclk_perilp1", | 1484 | "hclk_perilp1", |
1485 | "hclk_perilp1_noc", | 1485 | "hclk_perilp1_noc", |
1486 | "aclk_dmac0_perilp", | 1486 | "aclk_dmac0_perilp", |
1487 | "aclk_emmc_noc", | ||
1487 | "gpll_hclk_perilp1_src", | 1488 | "gpll_hclk_perilp1_src", |
1488 | "gpll_aclk_perilp0_src", | 1489 | "gpll_aclk_perilp0_src", |
1489 | "gpll_aclk_perihp_src", | 1490 | "gpll_aclk_perihp_src", |
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 9af359544110..267f99523fbe 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c | |||
@@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = { | |||
783 | [RST_BUS_I2S1] = { 0x2d0, BIT(13) }, | 783 | [RST_BUS_I2S1] = { 0x2d0, BIT(13) }, |
784 | [RST_BUS_I2S2] = { 0x2d0, BIT(14) }, | 784 | [RST_BUS_I2S2] = { 0x2d0, BIT(14) }, |
785 | 785 | ||
786 | [RST_BUS_I2C0] = { 0x2d4, BIT(0) }, | 786 | [RST_BUS_I2C0] = { 0x2d8, BIT(0) }, |
787 | [RST_BUS_I2C1] = { 0x2d4, BIT(1) }, | 787 | [RST_BUS_I2C1] = { 0x2d8, BIT(1) }, |
788 | [RST_BUS_I2C2] = { 0x2d4, BIT(2) }, | 788 | [RST_BUS_I2C2] = { 0x2d8, BIT(2) }, |
789 | [RST_BUS_UART0] = { 0x2d4, BIT(16) }, | 789 | [RST_BUS_UART0] = { 0x2d8, BIT(16) }, |
790 | [RST_BUS_UART1] = { 0x2d4, BIT(17) }, | 790 | [RST_BUS_UART1] = { 0x2d8, BIT(17) }, |
791 | [RST_BUS_UART2] = { 0x2d4, BIT(18) }, | 791 | [RST_BUS_UART2] = { 0x2d8, BIT(18) }, |
792 | [RST_BUS_UART3] = { 0x2d4, BIT(19) }, | 792 | [RST_BUS_UART3] = { 0x2d8, BIT(19) }, |
793 | [RST_BUS_SCR] = { 0x2d4, BIT(20) }, | 793 | [RST_BUS_SCR] = { 0x2d8, BIT(20) }, |
794 | }; | 794 | }; |
795 | 795 | ||
796 | static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { | 796 | static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = { |
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index fc17b5295e16..51d4bac97ab3 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c | |||
@@ -31,7 +31,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock) | |||
31 | return; | 31 | return; |
32 | 32 | ||
33 | WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg, | 33 | WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg, |
34 | !(reg & lock), 100, 70000)); | 34 | reg & lock, 100, 70000)); |
35 | } | 35 | } |
36 | 36 | ||
37 | int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, | 37 | int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, |
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c index 4470ffc8cf0d..d6fafb397489 100644 --- a/drivers/clk/sunxi-ng/ccu_nk.c +++ b/drivers/clk/sunxi-ng/ccu_nk.c | |||
@@ -14,9 +14,9 @@ | |||
14 | #include "ccu_gate.h" | 14 | #include "ccu_gate.h" |
15 | #include "ccu_nk.h" | 15 | #include "ccu_nk.h" |
16 | 16 | ||
17 | void ccu_nk_find_best(unsigned long parent, unsigned long rate, | 17 | static void ccu_nk_find_best(unsigned long parent, unsigned long rate, |
18 | unsigned int max_n, unsigned int max_k, | 18 | unsigned int max_n, unsigned int max_k, |
19 | unsigned int *n, unsigned int *k) | 19 | unsigned int *n, unsigned int *k) |
20 | { | 20 | { |
21 | unsigned long best_rate = 0; | 21 | unsigned long best_rate = 0; |
22 | unsigned int best_k = 0, best_n = 0; | 22 | unsigned int best_k = 0, best_n = 0; |
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c index 0ee1f363e4be..d8eab90ae661 100644 --- a/drivers/clk/sunxi/clk-a10-pll2.c +++ b/drivers/clk/sunxi/clk-a10-pll2.c | |||
@@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node, | |||
73 | SUN4I_PLL2_PRE_DIV_WIDTH, | 73 | SUN4I_PLL2_PRE_DIV_WIDTH, |
74 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 74 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
75 | &sun4i_a10_pll2_lock); | 75 | &sun4i_a10_pll2_lock); |
76 | if (!prediv_clk) { | 76 | if (IS_ERR(prediv_clk)) { |
77 | pr_err("Couldn't register the prediv clock\n"); | 77 | pr_err("Couldn't register the prediv clock\n"); |
78 | goto err_free_array; | 78 | goto err_free_array; |
79 | } | 79 | } |
@@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node, | |||
106 | &mult->hw, &clk_multiplier_ops, | 106 | &mult->hw, &clk_multiplier_ops, |
107 | &gate->hw, &clk_gate_ops, | 107 | &gate->hw, &clk_gate_ops, |
108 | CLK_SET_RATE_PARENT); | 108 | CLK_SET_RATE_PARENT); |
109 | if (!base_clk) { | 109 | if (IS_ERR(base_clk)) { |
110 | pr_err("Couldn't register the base multiplier clock\n"); | 110 | pr_err("Couldn't register the base multiplier clock\n"); |
111 | goto err_free_multiplier; | 111 | goto err_free_multiplier; |
112 | } | 112 | } |
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c index 411d3033a96e..b200ebf159ee 100644 --- a/drivers/clk/sunxi/clk-sun8i-mbus.c +++ b/drivers/clk/sunxi/clk-sun8i-mbus.c | |||
@@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node) | |||
48 | return; | 48 | return; |
49 | 49 | ||
50 | reg = of_io_request_and_map(node, 0, of_node_full_name(node)); | 50 | reg = of_io_request_and_map(node, 0, of_node_full_name(node)); |
51 | if (!reg) { | 51 | if (IS_ERR(reg)) { |
52 | pr_err("Could not get registers for sun8i-mbus-clk\n"); | 52 | pr_err("Could not get registers for sun8i-mbus-clk\n"); |
53 | goto err_free_parents; | 53 | goto err_free_parents; |
54 | } | 54 | } |
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 64da7b79a6e4..933b5dd698b8 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c | |||
@@ -428,7 +428,7 @@ static struct tegra_clk_pll_params pll_d_params = { | |||
428 | .div_nmp = &pllp_nmp, | 428 | .div_nmp = &pllp_nmp, |
429 | .freq_table = pll_d_freq_table, | 429 | .freq_table = pll_d_freq_table, |
430 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | | 430 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | |
431 | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, | 431 | TEGRA_PLL_HAS_LOCK_ENABLE, |
432 | }; | 432 | }; |
433 | 433 | ||
434 | static struct tegra_clk_pll_params pll_d2_params = { | 434 | static struct tegra_clk_pll_params pll_d2_params = { |
@@ -446,7 +446,7 @@ static struct tegra_clk_pll_params pll_d2_params = { | |||
446 | .div_nmp = &pllp_nmp, | 446 | .div_nmp = &pllp_nmp, |
447 | .freq_table = pll_d_freq_table, | 447 | .freq_table = pll_d_freq_table, |
448 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | | 448 | .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | |
449 | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, | 449 | TEGRA_PLL_HAS_LOCK_ENABLE, |
450 | }; | 450 | }; |
451 | 451 | ||
452 | static const struct pdiv_map pllu_p[] = { | 452 | static const struct pdiv_map pllu_p[] = { |
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 7e3fd375a627..92f6e4deee74 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c | |||
@@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base) | |||
66 | 66 | ||
67 | } | 67 | } |
68 | 68 | ||
69 | static void | 69 | static int |
70 | kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) | 70 | kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) |
71 | { | 71 | { |
72 | int loop_limit = 4; | 72 | int loop_limit = 3; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Read 64-bit free running counter | 75 | * Read 64-bit free running counter |
@@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) | |||
83 | * if new hi-word is equal to previously read hi-word then stop. | 83 | * if new hi-word is equal to previously read hi-word then stop. |
84 | */ | 84 | */ |
85 | 85 | ||
86 | while (--loop_limit) { | 86 | do { |
87 | *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); | 87 | *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); |
88 | *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); | 88 | *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); |
89 | if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) | 89 | if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) |
90 | break; | 90 | break; |
91 | } | 91 | } while (--loop_limit); |
92 | if (!loop_limit) { | 92 | if (!loop_limit) { |
93 | pr_err("bcm_kona_timer: getting counter failed.\n"); | 93 | pr_err("bcm_kona_timer: getting counter failed.\n"); |
94 | pr_err(" Timer will be impacted\n"); | 94 | pr_err(" Timer will be impacted\n"); |
95 | return -ETIMEDOUT; | ||
95 | } | 96 | } |
96 | 97 | ||
97 | return; | 98 | return 0; |
98 | } | 99 | } |
99 | 100 | ||
100 | static int kona_timer_set_next_event(unsigned long clc, | 101 | static int kona_timer_set_next_event(unsigned long clc, |
@@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc, | |||
112 | 113 | ||
113 | uint32_t lsw, msw; | 114 | uint32_t lsw, msw; |
114 | uint32_t reg; | 115 | uint32_t reg; |
116 | int ret; | ||
115 | 117 | ||
116 | kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); | 118 | ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); |
119 | if (ret) | ||
120 | return ret; | ||
117 | 121 | ||
118 | /* Load the "next" event tick value */ | 122 | /* Load the "next" event tick value */ |
119 | writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); | 123 | writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); |
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index d91e8725917c..b4b3ab5a11ad 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c | |||
@@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency) | |||
164 | gic_start_count(); | 164 | gic_start_count(); |
165 | } | 165 | } |
166 | 166 | ||
167 | static void __init gic_clocksource_of_init(struct device_node *node) | 167 | static int __init gic_clocksource_of_init(struct device_node *node) |
168 | { | 168 | { |
169 | struct clk *clk; | 169 | struct clk *clk; |
170 | int ret; | 170 | int ret; |
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 937e10b84d58..3e1cb512f3ce 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/of_irq.h> | 21 | #include <linux/of_irq.h> |
22 | #include <linux/sched_clock.h> | 22 | #include <linux/sched_clock.h> |
23 | 23 | ||
24 | #include <clocksource/pxa.h> | ||
25 | |||
24 | #include <asm/div64.h> | 26 | #include <asm/div64.h> |
25 | 27 | ||
26 | #define OSMR0 0x00 /* OS Timer 0 Match Register */ | 28 | #define OSMR0 0x00 /* OS Timer 0 Match Register */ |
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 97669ee4df2a..c83452cacb41 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c | |||
@@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = { | |||
123 | .set_next_event = sun4i_clkevt_next_event, | 123 | .set_next_event = sun4i_clkevt_next_event, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static void sun4i_timer_clear_interrupt(void) | ||
127 | { | ||
128 | writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG); | ||
129 | } | ||
126 | 130 | ||
127 | static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) | 131 | static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) |
128 | { | 132 | { |
129 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; | 133 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; |
130 | 134 | ||
131 | writel(0x1, timer_base + TIMER_IRQ_ST_REG); | 135 | sun4i_timer_clear_interrupt(); |
132 | evt->event_handler(evt); | 136 | evt->event_handler(evt); |
133 | 137 | ||
134 | return IRQ_HANDLED; | 138 | return IRQ_HANDLED; |
@@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node) | |||
208 | /* Make sure timer is stopped before playing with interrupts */ | 212 | /* Make sure timer is stopped before playing with interrupts */ |
209 | sun4i_clkevt_time_stop(0); | 213 | sun4i_clkevt_time_stop(0); |
210 | 214 | ||
215 | /* clear timer0 interrupt */ | ||
216 | sun4i_timer_clear_interrupt(); | ||
217 | |||
211 | sun4i_clockevent.cpumask = cpu_possible_mask; | 218 | sun4i_clockevent.cpumask = cpu_possible_mask; |
212 | sun4i_clockevent.irq = irq; | 219 | sun4i_clockevent.irq = irq; |
213 | 220 | ||
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 719b478d136e..3c39e6f45971 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np) | |||
338 | struct clk *clk = of_clk_get_by_name(np, "fixed"); | 338 | struct clk *clk = of_clk_get_by_name(np, "fixed"); |
339 | int ret; | 339 | int ret; |
340 | 340 | ||
341 | clk = of_clk_get(np, 0); | ||
342 | if (IS_ERR(clk)) { | 341 | if (IS_ERR(clk)) { |
343 | pr_err("Failed to get clock"); | 342 | pr_err("Failed to get clock"); |
344 | return PTR_ERR(clk); | 343 | return PTR_ERR(clk); |
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c index a7d9a08e4b0e..a8e6c7df853d 100644 --- a/drivers/clocksource/time-pistachio.c +++ b/drivers/clocksource/time-pistachio.c | |||
@@ -202,10 +202,10 @@ static int __init pistachio_clksrc_of_init(struct device_node *node) | |||
202 | rate = clk_get_rate(fast_clk); | 202 | rate = clk_get_rate(fast_clk); |
203 | 203 | ||
204 | /* Disable irq's for clocksource usage */ | 204 | /* Disable irq's for clocksource usage */ |
205 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); | 205 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); |
206 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); | 206 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); |
207 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); | 207 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); |
208 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); | 208 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); |
209 | 209 | ||
210 | /* Enable timer block */ | 210 | /* Enable timer block */ |
211 | writel(TIMER_ME_GLOBAL, pcs_gpt.base); | 211 | writel(TIMER_ME_GLOBAL, pcs_gpt.base); |
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index 1ffac0cb0cb7..7f0f5b26d8c5 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c | |||
@@ -240,6 +240,7 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data) | |||
240 | static int __init at91sam926x_pit_dt_init(struct device_node *node) | 240 | static int __init at91sam926x_pit_dt_init(struct device_node *node) |
241 | { | 241 | { |
242 | struct pit_data *data; | 242 | struct pit_data *data; |
243 | int ret; | ||
243 | 244 | ||
244 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 245 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
245 | if (!data) | 246 | if (!data) |
@@ -261,6 +262,12 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
261 | return PTR_ERR(data->mck); | 262 | return PTR_ERR(data->mck); |
262 | } | 263 | } |
263 | 264 | ||
265 | ret = clk_prepare_enable(data->mck); | ||
266 | if (ret) { | ||
267 | pr_err("Unable to enable mck\n"); | ||
268 | return ret; | ||
269 | } | ||
270 | |||
264 | /* Get the interrupts property */ | 271 | /* Get the interrupts property */ |
265 | data->irq = irq_of_parse_and_map(node, 0); | 272 | data->irq = irq_of_parse_and_map(node, 0); |
266 | if (!data->irq) { | 273 | if (!data->irq) { |
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 0bb44d5b5df4..2ee40fd360ca 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c | |||
@@ -74,6 +74,8 @@ static const struct of_device_id machines[] __initconst = { | |||
74 | { .compatible = "ti,omap5", }, | 74 | { .compatible = "ti,omap5", }, |
75 | 75 | ||
76 | { .compatible = "xlnx,zynq-7000", }, | 76 | { .compatible = "xlnx,zynq-7000", }, |
77 | |||
78 | { } | ||
77 | }; | 79 | }; |
78 | 80 | ||
79 | static int __init cpufreq_dt_platdev_init(void) | 81 | static int __init cpufreq_dt_platdev_init(void) |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ea8189f4b021..b3044219772c 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
441 | OP_ALG_AAI_CTR_MOD128); | 441 | OP_ALG_AAI_CTR_MOD128); |
442 | const bool is_rfc3686 = alg->caam.rfc3686; | 442 | const bool is_rfc3686 = alg->caam.rfc3686; |
443 | 443 | ||
444 | if (!ctx->authsize) | ||
445 | return 0; | ||
446 | |||
444 | /* NULL encryption / decryption */ | 447 | /* NULL encryption / decryption */ |
445 | if (!ctx->enckeylen) | 448 | if (!ctx->enckeylen) |
446 | return aead_null_set_sh_desc(aead); | 449 | return aead_null_set_sh_desc(aead); |
@@ -553,7 +556,10 @@ skip_enc: | |||
553 | 556 | ||
554 | /* Read and write assoclen bytes */ | 557 | /* Read and write assoclen bytes */ |
555 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 558 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
556 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 559 | if (alg->caam.geniv) |
560 | append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); | ||
561 | else | ||
562 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
557 | 563 | ||
558 | /* Skip assoc data */ | 564 | /* Skip assoc data */ |
559 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 565 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
@@ -562,6 +568,14 @@ skip_enc: | |||
562 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | 568 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | |
563 | KEY_VLF); | 569 | KEY_VLF); |
564 | 570 | ||
571 | if (alg->caam.geniv) { | ||
572 | append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | | ||
573 | LDST_SRCDST_BYTE_CONTEXT | | ||
574 | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
575 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | | ||
576 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); | ||
577 | } | ||
578 | |||
565 | /* Load Counter into CONTEXT1 reg */ | 579 | /* Load Counter into CONTEXT1 reg */ |
566 | if (is_rfc3686) | 580 | if (is_rfc3686) |
567 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | | 581 | append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | |
@@ -614,7 +628,7 @@ skip_enc: | |||
614 | keys_fit_inline = true; | 628 | keys_fit_inline = true; |
615 | 629 | ||
616 | /* aead_givencrypt shared descriptor */ | 630 | /* aead_givencrypt shared descriptor */ |
617 | desc = ctx->sh_desc_givenc; | 631 | desc = ctx->sh_desc_enc; |
618 | 632 | ||
619 | /* Note: Context registers are saved. */ | 633 | /* Note: Context registers are saved. */ |
620 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | 634 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); |
@@ -645,13 +659,13 @@ copy_iv: | |||
645 | append_operation(desc, ctx->class2_alg_type | | 659 | append_operation(desc, ctx->class2_alg_type | |
646 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 660 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
647 | 661 | ||
648 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
649 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
650 | |||
651 | /* Read and write assoclen bytes */ | 662 | /* Read and write assoclen bytes */ |
652 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 663 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
653 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 664 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
654 | 665 | ||
666 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
667 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
668 | |||
655 | /* Skip assoc data */ | 669 | /* Skip assoc data */ |
656 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 670 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
657 | 671 | ||
@@ -697,7 +711,7 @@ copy_iv: | |||
697 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 711 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
698 | desc_bytes(desc), | 712 | desc_bytes(desc), |
699 | DMA_TO_DEVICE); | 713 | DMA_TO_DEVICE); |
700 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | 714 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { |
701 | dev_err(jrdev, "unable to map shared descriptor\n"); | 715 | dev_err(jrdev, "unable to map shared descriptor\n"); |
702 | return -ENOMEM; | 716 | return -ENOMEM; |
703 | } | 717 | } |
@@ -2147,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req, | |||
2147 | 2161 | ||
2148 | init_aead_job(req, edesc, all_contig, encrypt); | 2162 | init_aead_job(req, edesc, all_contig, encrypt); |
2149 | 2163 | ||
2150 | if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt))) | 2164 | if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) |
2151 | append_load_as_imm(desc, req->iv, ivsize, | 2165 | append_load_as_imm(desc, req->iv, ivsize, |
2152 | LDST_CLASS_1_CCB | | 2166 | LDST_CLASS_1_CCB | |
2153 | LDST_SRCDST_BYTE_CONTEXT | | 2167 | LDST_SRCDST_BYTE_CONTEXT | |
@@ -2534,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req) | |||
2534 | return ret; | 2548 | return ret; |
2535 | } | 2549 | } |
2536 | 2550 | ||
2537 | static int aead_givdecrypt(struct aead_request *req) | ||
2538 | { | ||
2539 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
2540 | unsigned int ivsize = crypto_aead_ivsize(aead); | ||
2541 | |||
2542 | if (req->cryptlen < ivsize) | ||
2543 | return -EINVAL; | ||
2544 | |||
2545 | req->cryptlen -= ivsize; | ||
2546 | req->assoclen += ivsize; | ||
2547 | |||
2548 | return aead_decrypt(req); | ||
2549 | } | ||
2550 | |||
2551 | /* | 2551 | /* |
2552 | * allocate and map the ablkcipher extended descriptor for ablkcipher | 2552 | * allocate and map the ablkcipher extended descriptor for ablkcipher |
2553 | */ | 2553 | */ |
@@ -3207,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3207 | .setkey = aead_setkey, | 3207 | .setkey = aead_setkey, |
3208 | .setauthsize = aead_setauthsize, | 3208 | .setauthsize = aead_setauthsize, |
3209 | .encrypt = aead_encrypt, | 3209 | .encrypt = aead_encrypt, |
3210 | .decrypt = aead_givdecrypt, | 3210 | .decrypt = aead_decrypt, |
3211 | .ivsize = AES_BLOCK_SIZE, | 3211 | .ivsize = AES_BLOCK_SIZE, |
3212 | .maxauthsize = MD5_DIGEST_SIZE, | 3212 | .maxauthsize = MD5_DIGEST_SIZE, |
3213 | }, | 3213 | }, |
@@ -3253,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3253 | .setkey = aead_setkey, | 3253 | .setkey = aead_setkey, |
3254 | .setauthsize = aead_setauthsize, | 3254 | .setauthsize = aead_setauthsize, |
3255 | .encrypt = aead_encrypt, | 3255 | .encrypt = aead_encrypt, |
3256 | .decrypt = aead_givdecrypt, | 3256 | .decrypt = aead_decrypt, |
3257 | .ivsize = AES_BLOCK_SIZE, | 3257 | .ivsize = AES_BLOCK_SIZE, |
3258 | .maxauthsize = SHA1_DIGEST_SIZE, | 3258 | .maxauthsize = SHA1_DIGEST_SIZE, |
3259 | }, | 3259 | }, |
@@ -3299,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3299 | .setkey = aead_setkey, | 3299 | .setkey = aead_setkey, |
3300 | .setauthsize = aead_setauthsize, | 3300 | .setauthsize = aead_setauthsize, |
3301 | .encrypt = aead_encrypt, | 3301 | .encrypt = aead_encrypt, |
3302 | .decrypt = aead_givdecrypt, | 3302 | .decrypt = aead_decrypt, |
3303 | .ivsize = AES_BLOCK_SIZE, | 3303 | .ivsize = AES_BLOCK_SIZE, |
3304 | .maxauthsize = SHA224_DIGEST_SIZE, | 3304 | .maxauthsize = SHA224_DIGEST_SIZE, |
3305 | }, | 3305 | }, |
@@ -3345,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3345 | .setkey = aead_setkey, | 3345 | .setkey = aead_setkey, |
3346 | .setauthsize = aead_setauthsize, | 3346 | .setauthsize = aead_setauthsize, |
3347 | .encrypt = aead_encrypt, | 3347 | .encrypt = aead_encrypt, |
3348 | .decrypt = aead_givdecrypt, | 3348 | .decrypt = aead_decrypt, |
3349 | .ivsize = AES_BLOCK_SIZE, | 3349 | .ivsize = AES_BLOCK_SIZE, |
3350 | .maxauthsize = SHA256_DIGEST_SIZE, | 3350 | .maxauthsize = SHA256_DIGEST_SIZE, |
3351 | }, | 3351 | }, |
@@ -3391,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3391 | .setkey = aead_setkey, | 3391 | .setkey = aead_setkey, |
3392 | .setauthsize = aead_setauthsize, | 3392 | .setauthsize = aead_setauthsize, |
3393 | .encrypt = aead_encrypt, | 3393 | .encrypt = aead_encrypt, |
3394 | .decrypt = aead_givdecrypt, | 3394 | .decrypt = aead_decrypt, |
3395 | .ivsize = AES_BLOCK_SIZE, | 3395 | .ivsize = AES_BLOCK_SIZE, |
3396 | .maxauthsize = SHA384_DIGEST_SIZE, | 3396 | .maxauthsize = SHA384_DIGEST_SIZE, |
3397 | }, | 3397 | }, |
@@ -3437,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3437 | .setkey = aead_setkey, | 3437 | .setkey = aead_setkey, |
3438 | .setauthsize = aead_setauthsize, | 3438 | .setauthsize = aead_setauthsize, |
3439 | .encrypt = aead_encrypt, | 3439 | .encrypt = aead_encrypt, |
3440 | .decrypt = aead_givdecrypt, | 3440 | .decrypt = aead_decrypt, |
3441 | .ivsize = AES_BLOCK_SIZE, | 3441 | .ivsize = AES_BLOCK_SIZE, |
3442 | .maxauthsize = SHA512_DIGEST_SIZE, | 3442 | .maxauthsize = SHA512_DIGEST_SIZE, |
3443 | }, | 3443 | }, |
@@ -3483,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3483 | .setkey = aead_setkey, | 3483 | .setkey = aead_setkey, |
3484 | .setauthsize = aead_setauthsize, | 3484 | .setauthsize = aead_setauthsize, |
3485 | .encrypt = aead_encrypt, | 3485 | .encrypt = aead_encrypt, |
3486 | .decrypt = aead_givdecrypt, | 3486 | .decrypt = aead_decrypt, |
3487 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3487 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3488 | .maxauthsize = MD5_DIGEST_SIZE, | 3488 | .maxauthsize = MD5_DIGEST_SIZE, |
3489 | }, | 3489 | }, |
@@ -3531,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3531 | .setkey = aead_setkey, | 3531 | .setkey = aead_setkey, |
3532 | .setauthsize = aead_setauthsize, | 3532 | .setauthsize = aead_setauthsize, |
3533 | .encrypt = aead_encrypt, | 3533 | .encrypt = aead_encrypt, |
3534 | .decrypt = aead_givdecrypt, | 3534 | .decrypt = aead_decrypt, |
3535 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3535 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3536 | .maxauthsize = SHA1_DIGEST_SIZE, | 3536 | .maxauthsize = SHA1_DIGEST_SIZE, |
3537 | }, | 3537 | }, |
@@ -3579,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3579 | .setkey = aead_setkey, | 3579 | .setkey = aead_setkey, |
3580 | .setauthsize = aead_setauthsize, | 3580 | .setauthsize = aead_setauthsize, |
3581 | .encrypt = aead_encrypt, | 3581 | .encrypt = aead_encrypt, |
3582 | .decrypt = aead_givdecrypt, | 3582 | .decrypt = aead_decrypt, |
3583 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3583 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3584 | .maxauthsize = SHA224_DIGEST_SIZE, | 3584 | .maxauthsize = SHA224_DIGEST_SIZE, |
3585 | }, | 3585 | }, |
@@ -3627,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3627 | .setkey = aead_setkey, | 3627 | .setkey = aead_setkey, |
3628 | .setauthsize = aead_setauthsize, | 3628 | .setauthsize = aead_setauthsize, |
3629 | .encrypt = aead_encrypt, | 3629 | .encrypt = aead_encrypt, |
3630 | .decrypt = aead_givdecrypt, | 3630 | .decrypt = aead_decrypt, |
3631 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3631 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3632 | .maxauthsize = SHA256_DIGEST_SIZE, | 3632 | .maxauthsize = SHA256_DIGEST_SIZE, |
3633 | }, | 3633 | }, |
@@ -3675,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3675 | .setkey = aead_setkey, | 3675 | .setkey = aead_setkey, |
3676 | .setauthsize = aead_setauthsize, | 3676 | .setauthsize = aead_setauthsize, |
3677 | .encrypt = aead_encrypt, | 3677 | .encrypt = aead_encrypt, |
3678 | .decrypt = aead_givdecrypt, | 3678 | .decrypt = aead_decrypt, |
3679 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3679 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3680 | .maxauthsize = SHA384_DIGEST_SIZE, | 3680 | .maxauthsize = SHA384_DIGEST_SIZE, |
3681 | }, | 3681 | }, |
@@ -3723,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3723 | .setkey = aead_setkey, | 3723 | .setkey = aead_setkey, |
3724 | .setauthsize = aead_setauthsize, | 3724 | .setauthsize = aead_setauthsize, |
3725 | .encrypt = aead_encrypt, | 3725 | .encrypt = aead_encrypt, |
3726 | .decrypt = aead_givdecrypt, | 3726 | .decrypt = aead_decrypt, |
3727 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3727 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3728 | .maxauthsize = SHA512_DIGEST_SIZE, | 3728 | .maxauthsize = SHA512_DIGEST_SIZE, |
3729 | }, | 3729 | }, |
@@ -3769,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3769 | .setkey = aead_setkey, | 3769 | .setkey = aead_setkey, |
3770 | .setauthsize = aead_setauthsize, | 3770 | .setauthsize = aead_setauthsize, |
3771 | .encrypt = aead_encrypt, | 3771 | .encrypt = aead_encrypt, |
3772 | .decrypt = aead_givdecrypt, | 3772 | .decrypt = aead_decrypt, |
3773 | .ivsize = DES_BLOCK_SIZE, | 3773 | .ivsize = DES_BLOCK_SIZE, |
3774 | .maxauthsize = MD5_DIGEST_SIZE, | 3774 | .maxauthsize = MD5_DIGEST_SIZE, |
3775 | }, | 3775 | }, |
@@ -3815,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3815 | .setkey = aead_setkey, | 3815 | .setkey = aead_setkey, |
3816 | .setauthsize = aead_setauthsize, | 3816 | .setauthsize = aead_setauthsize, |
3817 | .encrypt = aead_encrypt, | 3817 | .encrypt = aead_encrypt, |
3818 | .decrypt = aead_givdecrypt, | 3818 | .decrypt = aead_decrypt, |
3819 | .ivsize = DES_BLOCK_SIZE, | 3819 | .ivsize = DES_BLOCK_SIZE, |
3820 | .maxauthsize = SHA1_DIGEST_SIZE, | 3820 | .maxauthsize = SHA1_DIGEST_SIZE, |
3821 | }, | 3821 | }, |
@@ -3861,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3861 | .setkey = aead_setkey, | 3861 | .setkey = aead_setkey, |
3862 | .setauthsize = aead_setauthsize, | 3862 | .setauthsize = aead_setauthsize, |
3863 | .encrypt = aead_encrypt, | 3863 | .encrypt = aead_encrypt, |
3864 | .decrypt = aead_givdecrypt, | 3864 | .decrypt = aead_decrypt, |
3865 | .ivsize = DES_BLOCK_SIZE, | 3865 | .ivsize = DES_BLOCK_SIZE, |
3866 | .maxauthsize = SHA224_DIGEST_SIZE, | 3866 | .maxauthsize = SHA224_DIGEST_SIZE, |
3867 | }, | 3867 | }, |
@@ -3907,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3907 | .setkey = aead_setkey, | 3907 | .setkey = aead_setkey, |
3908 | .setauthsize = aead_setauthsize, | 3908 | .setauthsize = aead_setauthsize, |
3909 | .encrypt = aead_encrypt, | 3909 | .encrypt = aead_encrypt, |
3910 | .decrypt = aead_givdecrypt, | 3910 | .decrypt = aead_decrypt, |
3911 | .ivsize = DES_BLOCK_SIZE, | 3911 | .ivsize = DES_BLOCK_SIZE, |
3912 | .maxauthsize = SHA256_DIGEST_SIZE, | 3912 | .maxauthsize = SHA256_DIGEST_SIZE, |
3913 | }, | 3913 | }, |
@@ -3953,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3953 | .setkey = aead_setkey, | 3953 | .setkey = aead_setkey, |
3954 | .setauthsize = aead_setauthsize, | 3954 | .setauthsize = aead_setauthsize, |
3955 | .encrypt = aead_encrypt, | 3955 | .encrypt = aead_encrypt, |
3956 | .decrypt = aead_givdecrypt, | 3956 | .decrypt = aead_decrypt, |
3957 | .ivsize = DES_BLOCK_SIZE, | 3957 | .ivsize = DES_BLOCK_SIZE, |
3958 | .maxauthsize = SHA384_DIGEST_SIZE, | 3958 | .maxauthsize = SHA384_DIGEST_SIZE, |
3959 | }, | 3959 | }, |
@@ -3999,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3999 | .setkey = aead_setkey, | 3999 | .setkey = aead_setkey, |
4000 | .setauthsize = aead_setauthsize, | 4000 | .setauthsize = aead_setauthsize, |
4001 | .encrypt = aead_encrypt, | 4001 | .encrypt = aead_encrypt, |
4002 | .decrypt = aead_givdecrypt, | 4002 | .decrypt = aead_decrypt, |
4003 | .ivsize = DES_BLOCK_SIZE, | 4003 | .ivsize = DES_BLOCK_SIZE, |
4004 | .maxauthsize = SHA512_DIGEST_SIZE, | 4004 | .maxauthsize = SHA512_DIGEST_SIZE, |
4005 | }, | 4005 | }, |
@@ -4048,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4048 | .setkey = aead_setkey, | 4048 | .setkey = aead_setkey, |
4049 | .setauthsize = aead_setauthsize, | 4049 | .setauthsize = aead_setauthsize, |
4050 | .encrypt = aead_encrypt, | 4050 | .encrypt = aead_encrypt, |
4051 | .decrypt = aead_givdecrypt, | 4051 | .decrypt = aead_decrypt, |
4052 | .ivsize = CTR_RFC3686_IV_SIZE, | 4052 | .ivsize = CTR_RFC3686_IV_SIZE, |
4053 | .maxauthsize = MD5_DIGEST_SIZE, | 4053 | .maxauthsize = MD5_DIGEST_SIZE, |
4054 | }, | 4054 | }, |
@@ -4099,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4099 | .setkey = aead_setkey, | 4099 | .setkey = aead_setkey, |
4100 | .setauthsize = aead_setauthsize, | 4100 | .setauthsize = aead_setauthsize, |
4101 | .encrypt = aead_encrypt, | 4101 | .encrypt = aead_encrypt, |
4102 | .decrypt = aead_givdecrypt, | 4102 | .decrypt = aead_decrypt, |
4103 | .ivsize = CTR_RFC3686_IV_SIZE, | 4103 | .ivsize = CTR_RFC3686_IV_SIZE, |
4104 | .maxauthsize = SHA1_DIGEST_SIZE, | 4104 | .maxauthsize = SHA1_DIGEST_SIZE, |
4105 | }, | 4105 | }, |
@@ -4150,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4150 | .setkey = aead_setkey, | 4150 | .setkey = aead_setkey, |
4151 | .setauthsize = aead_setauthsize, | 4151 | .setauthsize = aead_setauthsize, |
4152 | .encrypt = aead_encrypt, | 4152 | .encrypt = aead_encrypt, |
4153 | .decrypt = aead_givdecrypt, | 4153 | .decrypt = aead_decrypt, |
4154 | .ivsize = CTR_RFC3686_IV_SIZE, | 4154 | .ivsize = CTR_RFC3686_IV_SIZE, |
4155 | .maxauthsize = SHA224_DIGEST_SIZE, | 4155 | .maxauthsize = SHA224_DIGEST_SIZE, |
4156 | }, | 4156 | }, |
@@ -4201,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4201 | .setkey = aead_setkey, | 4201 | .setkey = aead_setkey, |
4202 | .setauthsize = aead_setauthsize, | 4202 | .setauthsize = aead_setauthsize, |
4203 | .encrypt = aead_encrypt, | 4203 | .encrypt = aead_encrypt, |
4204 | .decrypt = aead_givdecrypt, | 4204 | .decrypt = aead_decrypt, |
4205 | .ivsize = CTR_RFC3686_IV_SIZE, | 4205 | .ivsize = CTR_RFC3686_IV_SIZE, |
4206 | .maxauthsize = SHA256_DIGEST_SIZE, | 4206 | .maxauthsize = SHA256_DIGEST_SIZE, |
4207 | }, | 4207 | }, |
@@ -4252,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4252 | .setkey = aead_setkey, | 4252 | .setkey = aead_setkey, |
4253 | .setauthsize = aead_setauthsize, | 4253 | .setauthsize = aead_setauthsize, |
4254 | .encrypt = aead_encrypt, | 4254 | .encrypt = aead_encrypt, |
4255 | .decrypt = aead_givdecrypt, | 4255 | .decrypt = aead_decrypt, |
4256 | .ivsize = CTR_RFC3686_IV_SIZE, | 4256 | .ivsize = CTR_RFC3686_IV_SIZE, |
4257 | .maxauthsize = SHA384_DIGEST_SIZE, | 4257 | .maxauthsize = SHA384_DIGEST_SIZE, |
4258 | }, | 4258 | }, |
@@ -4303,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4303 | .setkey = aead_setkey, | 4303 | .setkey = aead_setkey, |
4304 | .setauthsize = aead_setauthsize, | 4304 | .setauthsize = aead_setauthsize, |
4305 | .encrypt = aead_encrypt, | 4305 | .encrypt = aead_encrypt, |
4306 | .decrypt = aead_givdecrypt, | 4306 | .decrypt = aead_decrypt, |
4307 | .ivsize = CTR_RFC3686_IV_SIZE, | 4307 | .ivsize = CTR_RFC3686_IV_SIZE, |
4308 | .maxauthsize = SHA512_DIGEST_SIZE, | 4308 | .maxauthsize = SHA512_DIGEST_SIZE, |
4309 | }, | 4309 | }, |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f1ecc8df8d41..36365b3efdfd 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
1898 | template->name); | 1898 | template->name); |
1899 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | 1899 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
1900 | template->driver_name); | 1900 | template->driver_name); |
1901 | t_alg->ahash_alg.setkey = NULL; | ||
1901 | } | 1902 | } |
1902 | alg->cra_module = THIS_MODULE; | 1903 | alg->cra_module = THIS_MODULE; |
1903 | alg->cra_init = caam_hash_cra_init; | 1904 | alg->cra_init = caam_hash_cra_init; |
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 769148dbaeb3..20f35df8a01f 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { { | |||
1260 | .setkey = qat_alg_ablkcipher_xts_setkey, | 1260 | .setkey = qat_alg_ablkcipher_xts_setkey, |
1261 | .decrypt = qat_alg_ablkcipher_decrypt, | 1261 | .decrypt = qat_alg_ablkcipher_decrypt, |
1262 | .encrypt = qat_alg_ablkcipher_encrypt, | 1262 | .encrypt = qat_alg_ablkcipher_encrypt, |
1263 | .min_keysize = AES_MIN_KEY_SIZE, | 1263 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
1264 | .max_keysize = AES_MAX_KEY_SIZE, | 1264 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
1265 | .ivsize = AES_BLOCK_SIZE, | 1265 | .ivsize = AES_BLOCK_SIZE, |
1266 | }, | 1266 | }, |
1267 | }, | 1267 | }, |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index cfb25413917c..24353ec336c5 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
@@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | |||
129 | 129 | ||
130 | blkcipher_walk_init(&walk, dst, src, nbytes); | 130 | blkcipher_walk_init(&walk, dst, src, nbytes); |
131 | 131 | ||
132 | iv = (u8 *)walk.iv; | ||
133 | ret = blkcipher_walk_virt(desc, &walk); | 132 | ret = blkcipher_walk_virt(desc, &walk); |
133 | iv = walk.iv; | ||
134 | memset(tweak, 0, AES_BLOCK_SIZE); | 134 | memset(tweak, 0, AES_BLOCK_SIZE); |
135 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 135 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
136 | 136 | ||
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 803f3953b341..29f600f2c447 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c | |||
@@ -459,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, | |||
459 | } | 459 | } |
460 | 460 | ||
461 | pgoff = linear_page_index(vma, pmd_addr); | 461 | pgoff = linear_page_index(vma, pmd_addr); |
462 | phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE); | 462 | phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); |
463 | if (phys == -1) { | 463 | if (phys == -1) { |
464 | dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, | 464 | dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, |
465 | pgoff); | 465 | pgoff); |
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index dfb168568af1..1f01e98c83c7 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c | |||
@@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev) | |||
116 | if (rc) | 116 | if (rc) |
117 | return rc; | 117 | return rc; |
118 | 118 | ||
119 | /* adjust the dax_region resource to the start of data */ | ||
120 | res.start += le64_to_cpu(pfn_sb->dataoff); | ||
121 | |||
119 | nd_region = to_nd_region(dev->parent); | 122 | nd_region = to_nd_region(dev->parent); |
120 | dax_region = alloc_dax_region(dev, nd_region->id, &res, | 123 | dax_region = alloc_dax_region(dev, nd_region->id, &res, |
121 | le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); | 124 | le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index e434ffe7bc5c..832cbd647145 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -2067,7 +2067,7 @@ err_dma_unregister: | |||
2067 | err_clk_disable: | 2067 | err_clk_disable: |
2068 | clk_disable_unprepare(atxdmac->clk); | 2068 | clk_disable_unprepare(atxdmac->clk); |
2069 | err_free_irq: | 2069 | err_free_irq: |
2070 | free_irq(atxdmac->irq, atxdmac->dma.dev); | 2070 | free_irq(atxdmac->irq, atxdmac); |
2071 | return ret; | 2071 | return ret; |
2072 | } | 2072 | } |
2073 | 2073 | ||
@@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev) | |||
2081 | dma_async_device_unregister(&atxdmac->dma); | 2081 | dma_async_device_unregister(&atxdmac->dma); |
2082 | clk_disable_unprepare(atxdmac->clk); | 2082 | clk_disable_unprepare(atxdmac->clk); |
2083 | 2083 | ||
2084 | free_irq(atxdmac->irq, atxdmac->dma.dev); | 2084 | free_irq(atxdmac->irq, atxdmac); |
2085 | 2085 | ||
2086 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | 2086 | for (i = 0; i < atxdmac->dma.chancnt; i++) { |
2087 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; | 2087 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; |
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index aad167eaaee8..de2a2a2b1d75 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c | |||
@@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev) | |||
836 | rc = of_property_read_u32(np, "reg", &off); | 836 | rc = of_property_read_u32(np, "reg", &off); |
837 | if (rc) { | 837 | if (rc) { |
838 | dev_err(dev, "Reg property not found in JQ node\n"); | 838 | dev_err(dev, "Reg property not found in JQ node\n"); |
839 | of_node_put(np); | ||
839 | return -ENODEV; | 840 | return -ENODEV; |
840 | } | 841 | } |
841 | /* Find out the Job Rings present under each JQ */ | 842 | /* Find out the Job Rings present under each JQ */ |
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index a4c53be482cf..624f1e1e9c55 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c | |||
@@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
861 | { | 861 | { |
862 | struct mdc_dma *mdma; | 862 | struct mdc_dma *mdma; |
863 | struct resource *res; | 863 | struct resource *res; |
864 | const struct of_device_id *match; | ||
865 | unsigned int i; | 864 | unsigned int i; |
866 | u32 val; | 865 | u32 val; |
867 | int ret; | 866 | int ret; |
@@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
871 | return -ENOMEM; | 870 | return -ENOMEM; |
872 | platform_set_drvdata(pdev, mdma); | 871 | platform_set_drvdata(pdev, mdma); |
873 | 872 | ||
874 | match = of_match_device(mdc_dma_of_match, &pdev->dev); | 873 | mdma->soc = of_device_get_match_data(&pdev->dev); |
875 | mdma->soc = match->data; | ||
876 | 874 | ||
877 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 875 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
878 | mdma->regs = devm_ioremap_resource(&pdev->dev, res); | 876 | mdma->regs = devm_ioremap_resource(&pdev->dev, res); |
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index dc7850a422b8..3f56f9ca4482 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc, | |||
638 | vd_last_issued = list_entry(vc->desc_issued.prev, | 638 | vd_last_issued = list_entry(vc->desc_issued.prev, |
639 | struct virt_dma_desc, node); | 639 | struct virt_dma_desc, node); |
640 | pxad_desc_chain(vd_last_issued, vd); | 640 | pxad_desc_chain(vd_last_issued, vd); |
641 | if (is_chan_running(chan) || is_desc_completed(vd_last_issued)) | 641 | if (is_chan_running(chan) || is_desc_completed(vd)) |
642 | return true; | 642 | return true; |
643 | } | 643 | } |
644 | 644 | ||
@@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
671 | struct virt_dma_desc *vd, *tmp; | 671 | struct virt_dma_desc *vd, *tmp; |
672 | unsigned int dcsr; | 672 | unsigned int dcsr; |
673 | unsigned long flags; | 673 | unsigned long flags; |
674 | bool vd_completed; | ||
674 | dma_cookie_t last_started = 0; | 675 | dma_cookie_t last_started = 0; |
675 | 676 | ||
676 | BUG_ON(!chan); | 677 | BUG_ON(!chan); |
@@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | |||
681 | 682 | ||
682 | spin_lock_irqsave(&chan->vc.lock, flags); | 683 | spin_lock_irqsave(&chan->vc.lock, flags); |
683 | list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { | 684 | list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { |
685 | vd_completed = is_desc_completed(vd); | ||
684 | dev_dbg(&chan->vc.chan.dev->device, | 686 | dev_dbg(&chan->vc.chan.dev->device, |
685 | "%s(): checking txd %p[%x]: completed=%d\n", | 687 | "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n", |
686 | __func__, vd, vd->tx.cookie, is_desc_completed(vd)); | 688 | __func__, vd, vd->tx.cookie, vd_completed, |
689 | dcsr); | ||
687 | last_started = vd->tx.cookie; | 690 | last_started = vd->tx.cookie; |
688 | if (to_pxad_sw_desc(vd)->cyclic) { | 691 | if (to_pxad_sw_desc(vd)->cyclic) { |
689 | vchan_cyclic_callback(vd); | 692 | vchan_cyclic_callback(vd); |
690 | break; | 693 | break; |
691 | } | 694 | } |
692 | if (is_desc_completed(vd)) { | 695 | if (vd_completed) { |
693 | list_del(&vd->node); | 696 | list_del(&vd->node); |
694 | vchan_cookie_complete(vd); | 697 | vchan_cookie_complete(vd); |
695 | } else { | 698 | } else { |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 749f1bd5d65d..06ecdc38cee0 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev) | |||
600 | { | 600 | { |
601 | struct usb_dmac_chan *chan = dev; | 601 | struct usb_dmac_chan *chan = dev; |
602 | irqreturn_t ret = IRQ_NONE; | 602 | irqreturn_t ret = IRQ_NONE; |
603 | u32 mask = USB_DMACHCR_TE; | 603 | u32 mask = 0; |
604 | u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP; | ||
605 | u32 chcr; | 604 | u32 chcr; |
605 | bool xfer_end = false; | ||
606 | 606 | ||
607 | spin_lock(&chan->vc.lock); | 607 | spin_lock(&chan->vc.lock); |
608 | 608 | ||
609 | chcr = usb_dmac_chan_read(chan, USB_DMACHCR); | 609 | chcr = usb_dmac_chan_read(chan, USB_DMACHCR); |
610 | if (chcr & check_bits) | 610 | if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) { |
611 | mask |= USB_DMACHCR_DE | check_bits; | 611 | mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP; |
612 | if (chcr & USB_DMACHCR_DE) | ||
613 | xfer_end = true; | ||
614 | ret |= IRQ_HANDLED; | ||
615 | } | ||
612 | if (chcr & USB_DMACHCR_NULL) { | 616 | if (chcr & USB_DMACHCR_NULL) { |
613 | /* An interruption of TE will happen after we set FTE */ | 617 | /* An interruption of TE will happen after we set FTE */ |
614 | mask |= USB_DMACHCR_NULL; | 618 | mask |= USB_DMACHCR_NULL; |
615 | chcr |= USB_DMACHCR_FTE; | 619 | chcr |= USB_DMACHCR_FTE; |
616 | ret |= IRQ_HANDLED; | 620 | ret |= IRQ_HANDLED; |
617 | } | 621 | } |
618 | usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); | 622 | if (mask) |
623 | usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); | ||
619 | 624 | ||
620 | if (chcr & check_bits) { | 625 | if (xfer_end) |
621 | usb_dmac_isr_transfer_end(chan); | 626 | usb_dmac_isr_transfer_end(chan); |
622 | ret |= IRQ_HANDLED; | ||
623 | } | ||
624 | 627 | ||
625 | spin_unlock(&chan->vc.lock); | 628 | spin_unlock(&chan->vc.lock); |
626 | 629 | ||
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index d0c1dab9b435..dff1a4a6dc1b 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -251,6 +251,14 @@ config EDAC_SBRIDGE | |||
251 | Support for error detection and correction the Intel | 251 | Support for error detection and correction the Intel |
252 | Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. | 252 | Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. |
253 | 253 | ||
254 | config EDAC_SKX | ||
255 | tristate "Intel Skylake server Integrated MC" | ||
256 | depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL | ||
257 | depends on PCI_MMCONFIG | ||
258 | help | ||
259 | Support for error detection and correction the Intel | ||
260 | Skylake server Integrated Memory Controllers. | ||
261 | |||
254 | config EDAC_MPC85XX | 262 | config EDAC_MPC85XX |
255 | tristate "Freescale MPC83xx / MPC85xx" | 263 | tristate "Freescale MPC83xx / MPC85xx" |
256 | depends on EDAC_MM_EDAC && FSL_SOC | 264 | depends on EDAC_MM_EDAC && FSL_SOC |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index f9e4a3e0e6e9..986049925b08 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o | |||
31 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o | 31 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o |
32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o | 32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o |
33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o | 33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o |
34 | obj-$(CONFIG_EDAC_SKX) += skx_edac.o | ||
34 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o | 35 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o |
35 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o | 36 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o |
36 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o | 37 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 4fb2eb7c800d..ce0067b7a2f6 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { | |||
552 | /* Knight's Landing Support */ | 552 | /* Knight's Landing Support */ |
553 | /* | 553 | /* |
554 | * KNL's memory channels are swizzled between memory controllers. | 554 | * KNL's memory channels are swizzled between memory controllers. |
555 | * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2 | 555 | * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2 |
556 | */ | 556 | */ |
557 | #define knl_channel_remap(channel) ((channel + 3) % 6) | 557 | #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3) |
558 | 558 | ||
559 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ | 559 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ |
560 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 | 560 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 |
@@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg) | |||
1286 | mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); | 1286 | mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); |
1287 | chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); | 1287 | chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); |
1288 | 1288 | ||
1289 | return knl_channel_remap(mc*3 + chan); | 1289 | return knl_channel_remap(mc, chan); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | /* | 1292 | /* |
@@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, | |||
2997 | } else { | 2997 | } else { |
2998 | char A = *("A"); | 2998 | char A = *("A"); |
2999 | 2999 | ||
3000 | channel = knl_channel_remap(channel); | 3000 | /* |
3001 | * Reported channel is in range 0-2, so we can't map it | ||
3002 | * back to mc. To figure out mc we check machine check | ||
3003 | * bank register that reported this error. | ||
3004 | * bank15 means mc0 and bank16 means mc1. | ||
3005 | */ | ||
3006 | channel = knl_channel_remap(m->bank == 16, channel); | ||
3001 | channel_mask = 1 << channel; | 3007 | channel_mask = 1 << channel; |
3008 | |||
3002 | snprintf(msg, sizeof(msg), | 3009 | snprintf(msg, sizeof(msg), |
3003 | "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", | 3010 | "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", |
3004 | overflow ? " OVERFLOW" : "", | 3011 | overflow ? " OVERFLOW" : "", |
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c new file mode 100644 index 000000000000..0ff4878c2aa1 --- /dev/null +++ b/drivers/edac/skx_edac.c | |||
@@ -0,0 +1,1121 @@ | |||
1 | /* | ||
2 | * EDAC driver for Intel(R) Xeon(R) Skylake processors | ||
3 | * Copyright (c) 2016, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/pci_ids.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/edac.h> | ||
22 | #include <linux/mmzone.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/bitmap.h> | ||
25 | #include <linux/math64.h> | ||
26 | #include <linux/mod_devicetable.h> | ||
27 | #include <asm/cpu_device_id.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/mce.h> | ||
30 | |||
31 | #include "edac_core.h" | ||
32 | |||
33 | #define SKX_REVISION " Ver: 1.0 " | ||
34 | |||
35 | /* | ||
36 | * Debug macros | ||
37 | */ | ||
38 | #define skx_printk(level, fmt, arg...) \ | ||
39 | edac_printk(level, "skx", fmt, ##arg) | ||
40 | |||
41 | #define skx_mc_printk(mci, level, fmt, arg...) \ | ||
42 | edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) | ||
43 | |||
44 | /* | ||
45 | * Get a bit field at register value <v>, from bit <lo> to bit <hi> | ||
46 | */ | ||
47 | #define GET_BITFIELD(v, lo, hi) \ | ||
48 | (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) | ||
49 | |||
50 | static LIST_HEAD(skx_edac_list); | ||
51 | |||
52 | static u64 skx_tolm, skx_tohm; | ||
53 | |||
54 | #define NUM_IMC 2 /* memory controllers per socket */ | ||
55 | #define NUM_CHANNELS 3 /* channels per memory controller */ | ||
56 | #define NUM_DIMMS 2 /* Max DIMMS per channel */ | ||
57 | |||
58 | #define MASK26 0x3FFFFFF /* Mask for 2^26 */ | ||
59 | #define MASK29 0x1FFFFFFF /* Mask for 2^29 */ | ||
60 | |||
61 | /* | ||
62 | * Each cpu socket contains some pci devices that provide global | ||
63 | * information, and also some that are local to each of the two | ||
64 | * memory controllers on the die. | ||
65 | */ | ||
66 | struct skx_dev { | ||
67 | struct list_head list; | ||
68 | u8 bus[4]; | ||
69 | struct pci_dev *sad_all; | ||
70 | struct pci_dev *util_all; | ||
71 | u32 mcroute; | ||
72 | struct skx_imc { | ||
73 | struct mem_ctl_info *mci; | ||
74 | u8 mc; /* system wide mc# */ | ||
75 | u8 lmc; /* socket relative mc# */ | ||
76 | u8 src_id, node_id; | ||
77 | struct skx_channel { | ||
78 | struct pci_dev *cdev; | ||
79 | struct skx_dimm { | ||
80 | u8 close_pg; | ||
81 | u8 bank_xor_enable; | ||
82 | u8 fine_grain_bank; | ||
83 | u8 rowbits; | ||
84 | u8 colbits; | ||
85 | } dimms[NUM_DIMMS]; | ||
86 | } chan[NUM_CHANNELS]; | ||
87 | } imc[NUM_IMC]; | ||
88 | }; | ||
89 | static int skx_num_sockets; | ||
90 | |||
91 | struct skx_pvt { | ||
92 | struct skx_imc *imc; | ||
93 | }; | ||
94 | |||
95 | struct decoded_addr { | ||
96 | struct skx_dev *dev; | ||
97 | u64 addr; | ||
98 | int socket; | ||
99 | int imc; | ||
100 | int channel; | ||
101 | u64 chan_addr; | ||
102 | int sktways; | ||
103 | int chanways; | ||
104 | int dimm; | ||
105 | int rank; | ||
106 | int channel_rank; | ||
107 | u64 rank_address; | ||
108 | int row; | ||
109 | int column; | ||
110 | int bank_address; | ||
111 | int bank_group; | ||
112 | }; | ||
113 | |||
114 | static struct skx_dev *get_skx_dev(u8 bus, u8 idx) | ||
115 | { | ||
116 | struct skx_dev *d; | ||
117 | |||
118 | list_for_each_entry(d, &skx_edac_list, list) { | ||
119 | if (d->bus[idx] == bus) | ||
120 | return d; | ||
121 | } | ||
122 | |||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | enum munittype { | ||
127 | CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD | ||
128 | }; | ||
129 | |||
130 | struct munit { | ||
131 | u16 did; | ||
132 | u16 devfn[NUM_IMC]; | ||
133 | u8 busidx; | ||
134 | u8 per_socket; | ||
135 | enum munittype mtype; | ||
136 | }; | ||
137 | |||
138 | /* | ||
139 | * List of PCI device ids that we need together with some device | ||
140 | * number and function numbers to tell which memory controller the | ||
141 | * device belongs to. | ||
142 | */ | ||
143 | static const struct munit skx_all_munits[] = { | ||
144 | { 0x2054, { }, 1, 1, SAD_ALL }, | ||
145 | { 0x2055, { }, 1, 1, UTIL_ALL }, | ||
146 | { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, | ||
147 | { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, | ||
148 | { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, | ||
149 | { 0x208e, { }, 1, 0, SAD }, | ||
150 | { } | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * We use the per-socket device 0x2016 to count how many sockets are present, | ||
155 | * and to detemine which PCI buses are associated with each socket. Allocate | ||
156 | * and build the full list of all the skx_dev structures that we need here. | ||
157 | */ | ||
158 | static int get_all_bus_mappings(void) | ||
159 | { | ||
160 | struct pci_dev *pdev, *prev; | ||
161 | struct skx_dev *d; | ||
162 | u32 reg; | ||
163 | int ndev = 0; | ||
164 | |||
165 | prev = NULL; | ||
166 | for (;;) { | ||
167 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev); | ||
168 | if (!pdev) | ||
169 | break; | ||
170 | ndev++; | ||
171 | d = kzalloc(sizeof(*d), GFP_KERNEL); | ||
172 | if (!d) { | ||
173 | pci_dev_put(pdev); | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | pci_read_config_dword(pdev, 0xCC, ®); | ||
177 | d->bus[0] = GET_BITFIELD(reg, 0, 7); | ||
178 | d->bus[1] = GET_BITFIELD(reg, 8, 15); | ||
179 | d->bus[2] = GET_BITFIELD(reg, 16, 23); | ||
180 | d->bus[3] = GET_BITFIELD(reg, 24, 31); | ||
181 | edac_dbg(2, "busses: %x, %x, %x, %x\n", | ||
182 | d->bus[0], d->bus[1], d->bus[2], d->bus[3]); | ||
183 | list_add_tail(&d->list, &skx_edac_list); | ||
184 | skx_num_sockets++; | ||
185 | prev = pdev; | ||
186 | } | ||
187 | |||
188 | return ndev; | ||
189 | } | ||
190 | |||
191 | static int get_all_munits(const struct munit *m) | ||
192 | { | ||
193 | struct pci_dev *pdev, *prev; | ||
194 | struct skx_dev *d; | ||
195 | u32 reg; | ||
196 | int i = 0, ndev = 0; | ||
197 | |||
198 | prev = NULL; | ||
199 | for (;;) { | ||
200 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev); | ||
201 | if (!pdev) | ||
202 | break; | ||
203 | ndev++; | ||
204 | if (m->per_socket == NUM_IMC) { | ||
205 | for (i = 0; i < NUM_IMC; i++) | ||
206 | if (m->devfn[i] == pdev->devfn) | ||
207 | break; | ||
208 | if (i == NUM_IMC) | ||
209 | goto fail; | ||
210 | } | ||
211 | d = get_skx_dev(pdev->bus->number, m->busidx); | ||
212 | if (!d) | ||
213 | goto fail; | ||
214 | |||
215 | /* Be sure that the device is enabled */ | ||
216 | if (unlikely(pci_enable_device(pdev) < 0)) { | ||
217 | skx_printk(KERN_ERR, | ||
218 | "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did); | ||
219 | goto fail; | ||
220 | } | ||
221 | |||
222 | switch (m->mtype) { | ||
223 | case CHAN0: case CHAN1: case CHAN2: | ||
224 | pci_dev_get(pdev); | ||
225 | d->imc[i].chan[m->mtype].cdev = pdev; | ||
226 | break; | ||
227 | case SAD_ALL: | ||
228 | pci_dev_get(pdev); | ||
229 | d->sad_all = pdev; | ||
230 | break; | ||
231 | case UTIL_ALL: | ||
232 | pci_dev_get(pdev); | ||
233 | d->util_all = pdev; | ||
234 | break; | ||
235 | case SAD: | ||
236 | /* | ||
237 | * one of these devices per core, including cores | ||
238 | * that don't exist on this SKU. Ignore any that | ||
239 | * read a route table of zero, make sure all the | ||
240 | * non-zero values match. | ||
241 | */ | ||
242 | pci_read_config_dword(pdev, 0xB4, ®); | ||
243 | if (reg != 0) { | ||
244 | if (d->mcroute == 0) | ||
245 | d->mcroute = reg; | ||
246 | else if (d->mcroute != reg) { | ||
247 | skx_printk(KERN_ERR, | ||
248 | "mcroute mismatch\n"); | ||
249 | goto fail; | ||
250 | } | ||
251 | } | ||
252 | ndev--; | ||
253 | break; | ||
254 | } | ||
255 | |||
256 | prev = pdev; | ||
257 | } | ||
258 | |||
259 | return ndev; | ||
260 | fail: | ||
261 | pci_dev_put(pdev); | ||
262 | return -ENODEV; | ||
263 | } | ||
264 | |||
265 | const struct x86_cpu_id skx_cpuids[] = { | ||
266 | { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */ | ||
267 | { } | ||
268 | }; | ||
269 | MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); | ||
270 | |||
271 | static u8 get_src_id(struct skx_dev *d) | ||
272 | { | ||
273 | u32 reg; | ||
274 | |||
275 | pci_read_config_dword(d->util_all, 0xF0, ®); | ||
276 | |||
277 | return GET_BITFIELD(reg, 12, 14); | ||
278 | } | ||
279 | |||
280 | static u8 skx_get_node_id(struct skx_dev *d) | ||
281 | { | ||
282 | u32 reg; | ||
283 | |||
284 | pci_read_config_dword(d->util_all, 0xF4, ®); | ||
285 | |||
286 | return GET_BITFIELD(reg, 0, 2); | ||
287 | } | ||
288 | |||
289 | static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, | ||
290 | int maxval, char *name) | ||
291 | { | ||
292 | u32 val = GET_BITFIELD(reg, lobit, hibit); | ||
293 | |||
294 | if (val < minval || val > maxval) { | ||
295 | edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | return val + add; | ||
299 | } | ||
300 | |||
301 | #define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15) | ||
302 | |||
303 | #define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks") | ||
304 | #define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows") | ||
305 | #define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols") | ||
306 | |||
307 | static int get_width(u32 mtr) | ||
308 | { | ||
309 | switch (GET_BITFIELD(mtr, 8, 9)) { | ||
310 | case 0: | ||
311 | return DEV_X4; | ||
312 | case 1: | ||
313 | return DEV_X8; | ||
314 | case 2: | ||
315 | return DEV_X16; | ||
316 | } | ||
317 | return DEV_UNKNOWN; | ||
318 | } | ||
319 | |||
320 | static int skx_get_hi_lo(void) | ||
321 | { | ||
322 | struct pci_dev *pdev; | ||
323 | u32 reg; | ||
324 | |||
325 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL); | ||
326 | if (!pdev) { | ||
327 | edac_dbg(0, "Can't get tolm/tohm\n"); | ||
328 | return -ENODEV; | ||
329 | } | ||
330 | |||
331 | pci_read_config_dword(pdev, 0xD0, ®); | ||
332 | skx_tolm = reg; | ||
333 | pci_read_config_dword(pdev, 0xD4, ®); | ||
334 | skx_tohm = reg; | ||
335 | pci_read_config_dword(pdev, 0xD8, ®); | ||
336 | skx_tohm |= (u64)reg << 32; | ||
337 | |||
338 | pci_dev_put(pdev); | ||
339 | edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, | ||
345 | struct skx_imc *imc, int chan, int dimmno) | ||
346 | { | ||
347 | int banks = 16, ranks, rows, cols, npages; | ||
348 | u64 size; | ||
349 | |||
350 | if (!IS_DIMM_PRESENT(mtr)) | ||
351 | return 0; | ||
352 | ranks = numrank(mtr); | ||
353 | rows = numrow(mtr); | ||
354 | cols = numcol(mtr); | ||
355 | |||
356 | /* | ||
357 | * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) | ||
358 | */ | ||
359 | size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); | ||
360 | npages = MiB_TO_PAGES(size); | ||
361 | |||
362 | edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", | ||
363 | imc->mc, chan, dimmno, size, npages, | ||
364 | banks, ranks, rows, cols); | ||
365 | |||
366 | imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); | ||
367 | imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); | ||
368 | imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); | ||
369 | imc->chan[chan].dimms[dimmno].rowbits = rows; | ||
370 | imc->chan[chan].dimms[dimmno].colbits = cols; | ||
371 | |||
372 | dimm->nr_pages = npages; | ||
373 | dimm->grain = 32; | ||
374 | dimm->dtype = get_width(mtr); | ||
375 | dimm->mtype = MEM_DDR4; | ||
376 | dimm->edac_mode = EDAC_SECDED; /* likely better than this */ | ||
377 | snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", | ||
378 | imc->src_id, imc->lmc, chan, dimmno); | ||
379 | |||
380 | return 1; | ||
381 | } | ||
382 | |||
383 | #define SKX_GET_MTMTR(dev, reg) \ | ||
384 | pci_read_config_dword((dev), 0x87c, ®) | ||
385 | |||
386 | static bool skx_check_ecc(struct pci_dev *pdev) | ||
387 | { | ||
388 | u32 mtmtr; | ||
389 | |||
390 | SKX_GET_MTMTR(pdev, mtmtr); | ||
391 | |||
392 | return !!GET_BITFIELD(mtmtr, 2, 2); | ||
393 | } | ||
394 | |||
395 | static int skx_get_dimm_config(struct mem_ctl_info *mci) | ||
396 | { | ||
397 | struct skx_pvt *pvt = mci->pvt_info; | ||
398 | struct skx_imc *imc = pvt->imc; | ||
399 | struct dimm_info *dimm; | ||
400 | int i, j; | ||
401 | u32 mtr, amap; | ||
402 | int ndimms; | ||
403 | |||
404 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
405 | ndimms = 0; | ||
406 | pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); | ||
407 | for (j = 0; j < NUM_DIMMS; j++) { | ||
408 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, | ||
409 | mci->n_layers, i, j, 0); | ||
410 | pci_read_config_dword(imc->chan[i].cdev, | ||
411 | 0x80 + 4*j, &mtr); | ||
412 | ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j); | ||
413 | } | ||
414 | if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { | ||
415 | skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); | ||
416 | return -ENODEV; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static void skx_unregister_mci(struct skx_imc *imc) | ||
424 | { | ||
425 | struct mem_ctl_info *mci = imc->mci; | ||
426 | |||
427 | if (!mci) | ||
428 | return; | ||
429 | |||
430 | edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); | ||
431 | |||
432 | /* Remove MC sysfs nodes */ | ||
433 | edac_mc_del_mc(mci->pdev); | ||
434 | |||
435 | edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); | ||
436 | kfree(mci->ctl_name); | ||
437 | edac_mc_free(mci); | ||
438 | } | ||
439 | |||
440 | static int skx_register_mci(struct skx_imc *imc) | ||
441 | { | ||
442 | struct mem_ctl_info *mci; | ||
443 | struct edac_mc_layer layers[2]; | ||
444 | struct pci_dev *pdev = imc->chan[0].cdev; | ||
445 | struct skx_pvt *pvt; | ||
446 | int rc; | ||
447 | |||
448 | /* allocate a new MC control structure */ | ||
449 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | ||
450 | layers[0].size = NUM_CHANNELS; | ||
451 | layers[0].is_virt_csrow = false; | ||
452 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
453 | layers[1].size = NUM_DIMMS; | ||
454 | layers[1].is_virt_csrow = true; | ||
455 | mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, | ||
456 | sizeof(struct skx_pvt)); | ||
457 | |||
458 | if (unlikely(!mci)) | ||
459 | return -ENOMEM; | ||
460 | |||
461 | edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); | ||
462 | |||
463 | /* Associate skx_dev and mci for future usage */ | ||
464 | imc->mci = mci; | ||
465 | pvt = mci->pvt_info; | ||
466 | pvt->imc = imc; | ||
467 | |||
468 | mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", | ||
469 | imc->node_id, imc->lmc); | ||
470 | mci->mtype_cap = MEM_FLAG_DDR4; | ||
471 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | ||
472 | mci->edac_cap = EDAC_FLAG_NONE; | ||
473 | mci->mod_name = "skx_edac.c"; | ||
474 | mci->dev_name = pci_name(imc->chan[0].cdev); | ||
475 | mci->mod_ver = SKX_REVISION; | ||
476 | mci->ctl_page_to_phys = NULL; | ||
477 | |||
478 | rc = skx_get_dimm_config(mci); | ||
479 | if (rc < 0) | ||
480 | goto fail; | ||
481 | |||
482 | /* record ptr to the generic device */ | ||
483 | mci->pdev = &pdev->dev; | ||
484 | |||
485 | /* add this new MC control structure to EDAC's list of MCs */ | ||
486 | if (unlikely(edac_mc_add_mc(mci))) { | ||
487 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | ||
488 | rc = -EINVAL; | ||
489 | goto fail; | ||
490 | } | ||
491 | |||
492 | return 0; | ||
493 | |||
494 | fail: | ||
495 | kfree(mci->ctl_name); | ||
496 | edac_mc_free(mci); | ||
497 | imc->mci = NULL; | ||
498 | return rc; | ||
499 | } | ||
500 | |||
501 | #define SKX_MAX_SAD 24 | ||
502 | |||
503 | #define SKX_GET_SAD(d, i, reg) \ | ||
504 | pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), ®) | ||
505 | #define SKX_GET_ILV(d, i, reg) \ | ||
506 | pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), ®) | ||
507 | |||
508 | #define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31) | ||
509 | #define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27) | ||
510 | #define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26) | ||
511 | #define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6) | ||
512 | #define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4) | ||
513 | #define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2) | ||
514 | #define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0) | ||
515 | |||
516 | #define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) | ||
517 | #define SKX_ILV_TARGET(tgt) ((tgt) & 7) | ||
518 | |||
519 | static bool skx_sad_decode(struct decoded_addr *res) | ||
520 | { | ||
521 | struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list); | ||
522 | u64 addr = res->addr; | ||
523 | int i, idx, tgt, lchan, shift; | ||
524 | u32 sad, ilv; | ||
525 | u64 limit, prev_limit; | ||
526 | int remote = 0; | ||
527 | |||
528 | /* Simple sanity check for I/O space or out of range */ | ||
529 | if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) { | ||
530 | edac_dbg(0, "Address %llx out of range\n", addr); | ||
531 | return false; | ||
532 | } | ||
533 | |||
534 | restart: | ||
535 | prev_limit = 0; | ||
536 | for (i = 0; i < SKX_MAX_SAD; i++) { | ||
537 | SKX_GET_SAD(d, i, sad); | ||
538 | limit = SKX_SAD_LIMIT(sad); | ||
539 | if (SKX_SAD_ENABLE(sad)) { | ||
540 | if (addr >= prev_limit && addr <= limit) | ||
541 | goto sad_found; | ||
542 | } | ||
543 | prev_limit = limit + 1; | ||
544 | } | ||
545 | edac_dbg(0, "No SAD entry for %llx\n", addr); | ||
546 | return false; | ||
547 | |||
548 | sad_found: | ||
549 | SKX_GET_ILV(d, i, ilv); | ||
550 | |||
551 | switch (SKX_SAD_INTERLEAVE(sad)) { | ||
552 | case 0: | ||
553 | idx = GET_BITFIELD(addr, 6, 8); | ||
554 | break; | ||
555 | case 1: | ||
556 | idx = GET_BITFIELD(addr, 8, 10); | ||
557 | break; | ||
558 | case 2: | ||
559 | idx = GET_BITFIELD(addr, 12, 14); | ||
560 | break; | ||
561 | case 3: | ||
562 | idx = GET_BITFIELD(addr, 30, 32); | ||
563 | break; | ||
564 | } | ||
565 | |||
566 | tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3); | ||
567 | |||
568 | /* If point to another node, find it and start over */ | ||
569 | if (SKX_ILV_REMOTE(tgt)) { | ||
570 | if (remote) { | ||
571 | edac_dbg(0, "Double remote!\n"); | ||
572 | return false; | ||
573 | } | ||
574 | remote = 1; | ||
575 | list_for_each_entry(d, &skx_edac_list, list) { | ||
576 | if (d->imc[0].src_id == SKX_ILV_TARGET(tgt)) | ||
577 | goto restart; | ||
578 | } | ||
579 | edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt)); | ||
580 | return false; | ||
581 | } | ||
582 | |||
583 | if (SKX_SAD_MOD3(sad) == 0) | ||
584 | lchan = SKX_ILV_TARGET(tgt); | ||
585 | else { | ||
586 | switch (SKX_SAD_MOD3MODE(sad)) { | ||
587 | case 0: | ||
588 | shift = 6; | ||
589 | break; | ||
590 | case 1: | ||
591 | shift = 8; | ||
592 | break; | ||
593 | case 2: | ||
594 | shift = 12; | ||
595 | break; | ||
596 | default: | ||
597 | edac_dbg(0, "illegal mod3mode\n"); | ||
598 | return false; | ||
599 | } | ||
600 | switch (SKX_SAD_MOD3ASMOD2(sad)) { | ||
601 | case 0: | ||
602 | lchan = (addr >> shift) % 3; | ||
603 | break; | ||
604 | case 1: | ||
605 | lchan = (addr >> shift) % 2; | ||
606 | break; | ||
607 | case 2: | ||
608 | lchan = (addr >> shift) % 2; | ||
609 | lchan = (lchan << 1) | ~lchan; | ||
610 | break; | ||
611 | case 3: | ||
612 | lchan = ((addr >> shift) % 2) << 1; | ||
613 | break; | ||
614 | } | ||
615 | lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1); | ||
616 | } | ||
617 | |||
618 | res->dev = d; | ||
619 | res->socket = d->imc[0].src_id; | ||
620 | res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2); | ||
621 | res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19); | ||
622 | |||
623 | edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n", | ||
624 | res->addr, res->socket, res->imc, res->channel); | ||
625 | return true; | ||
626 | } | ||
627 | |||
628 | #define SKX_MAX_TAD 8 | ||
629 | |||
630 | #define SKX_GET_TADBASE(d, mc, i, reg) \ | ||
631 | pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), ®) | ||
632 | #define SKX_GET_TADWAYNESS(d, mc, i, reg) \ | ||
633 | pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), ®) | ||
634 | #define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \ | ||
635 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), ®) | ||
636 | |||
637 | #define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26) | ||
638 | #define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5) | ||
639 | #define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7) | ||
640 | #define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26) | ||
641 | #define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26) | ||
642 | #define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11)) | ||
643 | #define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1) | ||
644 | |||
645 | /* which bit used for both socket and channel interleave */ | ||
646 | static int skx_granularity[] = { 6, 8, 12, 30 }; | ||
647 | |||
648 | static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits) | ||
649 | { | ||
650 | addr >>= shift; | ||
651 | addr /= ways; | ||
652 | addr <<= shift; | ||
653 | |||
654 | return addr | (lowbits & ((1ull << shift) - 1)); | ||
655 | } | ||
656 | |||
657 | static bool skx_tad_decode(struct decoded_addr *res) | ||
658 | { | ||
659 | int i; | ||
660 | u32 base, wayness, chnilvoffset; | ||
661 | int skt_interleave_bit, chn_interleave_bit; | ||
662 | u64 channel_addr; | ||
663 | |||
664 | for (i = 0; i < SKX_MAX_TAD; i++) { | ||
665 | SKX_GET_TADBASE(res->dev, res->imc, i, base); | ||
666 | SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness); | ||
667 | if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness)) | ||
668 | goto tad_found; | ||
669 | } | ||
670 | edac_dbg(0, "No TAD entry for %llx\n", res->addr); | ||
671 | return false; | ||
672 | |||
673 | tad_found: | ||
674 | res->sktways = SKX_TAD_SKTWAYS(wayness); | ||
675 | res->chanways = SKX_TAD_CHNWAYS(wayness); | ||
676 | skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)]; | ||
677 | chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)]; | ||
678 | |||
679 | SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset); | ||
680 | channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset); | ||
681 | |||
682 | if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) { | ||
683 | /* Must handle channel first, then socket */ | ||
684 | channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, | ||
685 | res->chanways, channel_addr); | ||
686 | channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, | ||
687 | res->sktways, channel_addr); | ||
688 | } else { | ||
689 | /* Handle socket then channel. Preserve low bits from original address */ | ||
690 | channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, | ||
691 | res->sktways, res->addr); | ||
692 | channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, | ||
693 | res->chanways, res->addr); | ||
694 | } | ||
695 | |||
696 | res->chan_addr = channel_addr; | ||
697 | |||
698 | edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n", | ||
699 | res->addr, res->chan_addr, res->sktways, res->chanways); | ||
700 | return true; | ||
701 | } | ||
702 | |||
703 | #define SKX_MAX_RIR 4 | ||
704 | |||
705 | #define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \ | ||
706 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ | ||
707 | 0x108 + 4 * (i), ®) | ||
708 | #define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \ | ||
709 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ | ||
710 | 0x120 + 16 * idx + 4 * (i), ®) | ||
711 | |||
712 | #define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31) | ||
713 | #define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29) | ||
714 | #define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29)) | ||
715 | #define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19) | ||
716 | #define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26)) | ||
717 | |||
718 | static bool skx_rir_decode(struct decoded_addr *res) | ||
719 | { | ||
720 | int i, idx, chan_rank; | ||
721 | int shift; | ||
722 | u32 rirway, rirlv; | ||
723 | u64 rank_addr, prev_limit = 0, limit; | ||
724 | |||
725 | if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg) | ||
726 | shift = 6; | ||
727 | else | ||
728 | shift = 13; | ||
729 | |||
730 | for (i = 0; i < SKX_MAX_RIR; i++) { | ||
731 | SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway); | ||
732 | limit = SKX_RIR_LIMIT(rirway); | ||
733 | if (SKX_RIR_VALID(rirway)) { | ||
734 | if (prev_limit <= res->chan_addr && | ||
735 | res->chan_addr <= limit) | ||
736 | goto rir_found; | ||
737 | } | ||
738 | prev_limit = limit; | ||
739 | } | ||
740 | edac_dbg(0, "No RIR entry for %llx\n", res->addr); | ||
741 | return false; | ||
742 | |||
743 | rir_found: | ||
744 | rank_addr = res->chan_addr >> shift; | ||
745 | rank_addr /= SKX_RIR_WAYS(rirway); | ||
746 | rank_addr <<= shift; | ||
747 | rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0); | ||
748 | |||
749 | res->rank_address = rank_addr; | ||
750 | idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway); | ||
751 | |||
752 | SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv); | ||
753 | res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv); | ||
754 | chan_rank = SKX_RIR_CHAN_RANK(rirlv); | ||
755 | res->channel_rank = chan_rank; | ||
756 | res->dimm = chan_rank / 4; | ||
757 | res->rank = chan_rank % 4; | ||
758 | |||
759 | edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n", | ||
760 | res->addr, res->dimm, res->rank, | ||
761 | res->channel_rank, res->rank_address); | ||
762 | return true; | ||
763 | } | ||
764 | |||
765 | static u8 skx_close_row[] = { | ||
766 | 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 | ||
767 | }; | ||
768 | static u8 skx_close_column[] = { | ||
769 | 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 | ||
770 | }; | ||
771 | static u8 skx_open_row[] = { | ||
772 | 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 | ||
773 | }; | ||
774 | static u8 skx_open_column[] = { | ||
775 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 | ||
776 | }; | ||
777 | static u8 skx_open_fine_column[] = { | ||
778 | 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 | ||
779 | }; | ||
780 | |||
781 | static int skx_bits(u64 addr, int nbits, u8 *bits) | ||
782 | { | ||
783 | int i, res = 0; | ||
784 | |||
785 | for (i = 0; i < nbits; i++) | ||
786 | res |= ((addr >> bits[i]) & 1) << i; | ||
787 | return res; | ||
788 | } | ||
789 | |||
790 | static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1) | ||
791 | { | ||
792 | int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1); | ||
793 | |||
794 | if (do_xor) | ||
795 | ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1); | ||
796 | |||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | static bool skx_mad_decode(struct decoded_addr *r) | ||
801 | { | ||
802 | struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm]; | ||
803 | int bg0 = dimm->fine_grain_bank ? 6 : 13; | ||
804 | |||
805 | if (dimm->close_pg) { | ||
806 | r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row); | ||
807 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column); | ||
808 | r->column |= 0x400; /* C10 is autoprecharge, always set */ | ||
809 | r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28); | ||
810 | r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21); | ||
811 | } else { | ||
812 | r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row); | ||
813 | if (dimm->fine_grain_bank) | ||
814 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column); | ||
815 | else | ||
816 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column); | ||
817 | r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23); | ||
818 | r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21); | ||
819 | } | ||
820 | r->row &= (1u << dimm->rowbits) - 1; | ||
821 | |||
822 | edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n", | ||
823 | r->addr, r->row, r->column, r->bank_address, | ||
824 | r->bank_group); | ||
825 | return true; | ||
826 | } | ||
827 | |||
828 | static bool skx_decode(struct decoded_addr *res) | ||
829 | { | ||
830 | |||
831 | return skx_sad_decode(res) && skx_tad_decode(res) && | ||
832 | skx_rir_decode(res) && skx_mad_decode(res); | ||
833 | } | ||
834 | |||
835 | #ifdef CONFIG_EDAC_DEBUG | ||
836 | /* | ||
837 | * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr. | ||
838 | * Write an address to this file to exercise the address decode | ||
839 | * logic in this driver. | ||
840 | */ | ||
841 | static struct dentry *skx_test; | ||
842 | static u64 skx_fake_addr; | ||
843 | |||
844 | static int debugfs_u64_set(void *data, u64 val) | ||
845 | { | ||
846 | struct decoded_addr res; | ||
847 | |||
848 | res.addr = val; | ||
849 | skx_decode(&res); | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); | ||
855 | |||
856 | static struct dentry *mydebugfs_create(const char *name, umode_t mode, | ||
857 | struct dentry *parent, u64 *value) | ||
858 | { | ||
859 | return debugfs_create_file(name, mode, parent, value, &fops_u64_wo); | ||
860 | } | ||
861 | |||
862 | static void setup_skx_debug(void) | ||
863 | { | ||
864 | skx_test = debugfs_create_dir("skx_edac_test", NULL); | ||
865 | mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr); | ||
866 | } | ||
867 | |||
868 | static void teardown_skx_debug(void) | ||
869 | { | ||
870 | debugfs_remove_recursive(skx_test); | ||
871 | } | ||
872 | #else | ||
873 | static void setup_skx_debug(void) | ||
874 | { | ||
875 | } | ||
876 | |||
877 | static void teardown_skx_debug(void) | ||
878 | { | ||
879 | } | ||
880 | #endif /*CONFIG_EDAC_DEBUG*/ | ||
881 | |||
882 | static void skx_mce_output_error(struct mem_ctl_info *mci, | ||
883 | const struct mce *m, | ||
884 | struct decoded_addr *res) | ||
885 | { | ||
886 | enum hw_event_mc_err_type tp_event; | ||
887 | char *type, *optype, msg[256]; | ||
888 | bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); | ||
889 | bool overflow = GET_BITFIELD(m->status, 62, 62); | ||
890 | bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); | ||
891 | bool recoverable; | ||
892 | u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); | ||
893 | u32 mscod = GET_BITFIELD(m->status, 16, 31); | ||
894 | u32 errcode = GET_BITFIELD(m->status, 0, 15); | ||
895 | u32 optypenum = GET_BITFIELD(m->status, 4, 6); | ||
896 | |||
897 | recoverable = GET_BITFIELD(m->status, 56, 56); | ||
898 | |||
899 | if (uncorrected_error) { | ||
900 | if (ripv) { | ||
901 | type = "FATAL"; | ||
902 | tp_event = HW_EVENT_ERR_FATAL; | ||
903 | } else { | ||
904 | type = "NON_FATAL"; | ||
905 | tp_event = HW_EVENT_ERR_UNCORRECTED; | ||
906 | } | ||
907 | } else { | ||
908 | type = "CORRECTED"; | ||
909 | tp_event = HW_EVENT_ERR_CORRECTED; | ||
910 | } | ||
911 | |||
912 | /* | ||
913 | * According with Table 15-9 of the Intel Architecture spec vol 3A, | ||
914 | * memory errors should fit in this mask: | ||
915 | * 000f 0000 1mmm cccc (binary) | ||
916 | * where: | ||
917 | * f = Correction Report Filtering Bit. If 1, subsequent errors | ||
918 | * won't be shown | ||
919 | * mmm = error type | ||
920 | * cccc = channel | ||
921 | * If the mask doesn't match, report an error to the parsing logic | ||
922 | */ | ||
923 | if (!((errcode & 0xef80) == 0x80)) { | ||
924 | optype = "Can't parse: it is not a mem"; | ||
925 | } else { | ||
926 | switch (optypenum) { | ||
927 | case 0: | ||
928 | optype = "generic undef request error"; | ||
929 | break; | ||
930 | case 1: | ||
931 | optype = "memory read error"; | ||
932 | break; | ||
933 | case 2: | ||
934 | optype = "memory write error"; | ||
935 | break; | ||
936 | case 3: | ||
937 | optype = "addr/cmd error"; | ||
938 | break; | ||
939 | case 4: | ||
940 | optype = "memory scrubbing error"; | ||
941 | break; | ||
942 | default: | ||
943 | optype = "reserved"; | ||
944 | break; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | snprintf(msg, sizeof(msg), | ||
949 | "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", | ||
950 | overflow ? " OVERFLOW" : "", | ||
951 | (uncorrected_error && recoverable) ? " recoverable" : "", | ||
952 | mscod, errcode, | ||
953 | res->socket, res->imc, res->rank, | ||
954 | res->bank_group, res->bank_address, res->row, res->column); | ||
955 | |||
956 | edac_dbg(0, "%s\n", msg); | ||
957 | |||
958 | /* Call the helper to output message */ | ||
959 | edac_mc_handle_error(tp_event, mci, core_err_cnt, | ||
960 | m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, | ||
961 | res->channel, res->dimm, -1, | ||
962 | optype, msg); | ||
963 | } | ||
964 | |||
965 | static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, | ||
966 | void *data) | ||
967 | { | ||
968 | struct mce *mce = (struct mce *)data; | ||
969 | struct decoded_addr res; | ||
970 | struct mem_ctl_info *mci; | ||
971 | char *type; | ||
972 | |||
973 | if (get_edac_report_status() == EDAC_REPORTING_DISABLED) | ||
974 | return NOTIFY_DONE; | ||
975 | |||
976 | /* ignore unless this is memory related with an address */ | ||
977 | if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) | ||
978 | return NOTIFY_DONE; | ||
979 | |||
980 | res.addr = mce->addr; | ||
981 | if (!skx_decode(&res)) | ||
982 | return NOTIFY_DONE; | ||
983 | mci = res.dev->imc[res.imc].mci; | ||
984 | |||
985 | if (mce->mcgstatus & MCG_STATUS_MCIP) | ||
986 | type = "Exception"; | ||
987 | else | ||
988 | type = "Event"; | ||
989 | |||
990 | skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); | ||
991 | |||
992 | skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " | ||
993 | "Bank %d: %016Lx\n", mce->extcpu, type, | ||
994 | mce->mcgstatus, mce->bank, mce->status); | ||
995 | skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); | ||
996 | skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); | ||
997 | skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); | ||
998 | |||
999 | skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " | ||
1000 | "%u APIC %x\n", mce->cpuvendor, mce->cpuid, | ||
1001 | mce->time, mce->socketid, mce->apicid); | ||
1002 | |||
1003 | skx_mce_output_error(mci, mce, &res); | ||
1004 | |||
1005 | return NOTIFY_DONE; | ||
1006 | } | ||
1007 | |||
1008 | static struct notifier_block skx_mce_dec = { | ||
1009 | .notifier_call = skx_mce_check_error, | ||
1010 | }; | ||
1011 | |||
1012 | static void skx_remove(void) | ||
1013 | { | ||
1014 | int i, j; | ||
1015 | struct skx_dev *d, *tmp; | ||
1016 | |||
1017 | edac_dbg(0, "\n"); | ||
1018 | |||
1019 | list_for_each_entry_safe(d, tmp, &skx_edac_list, list) { | ||
1020 | list_del(&d->list); | ||
1021 | for (i = 0; i < NUM_IMC; i++) { | ||
1022 | skx_unregister_mci(&d->imc[i]); | ||
1023 | for (j = 0; j < NUM_CHANNELS; j++) | ||
1024 | pci_dev_put(d->imc[i].chan[j].cdev); | ||
1025 | } | ||
1026 | pci_dev_put(d->util_all); | ||
1027 | pci_dev_put(d->sad_all); | ||
1028 | |||
1029 | kfree(d); | ||
1030 | } | ||
1031 | } | ||
1032 | |||
1033 | /* | ||
1034 | * skx_init: | ||
1035 | * make sure we are running on the correct cpu model | ||
1036 | * search for all the devices we need | ||
1037 | * check which DIMMs are present. | ||
1038 | */ | ||
1039 | int __init skx_init(void) | ||
1040 | { | ||
1041 | const struct x86_cpu_id *id; | ||
1042 | const struct munit *m; | ||
1043 | int rc = 0, i; | ||
1044 | u8 mc = 0, src_id, node_id; | ||
1045 | struct skx_dev *d; | ||
1046 | |||
1047 | edac_dbg(2, "\n"); | ||
1048 | |||
1049 | id = x86_match_cpu(skx_cpuids); | ||
1050 | if (!id) | ||
1051 | return -ENODEV; | ||
1052 | |||
1053 | rc = skx_get_hi_lo(); | ||
1054 | if (rc) | ||
1055 | return rc; | ||
1056 | |||
1057 | rc = get_all_bus_mappings(); | ||
1058 | if (rc < 0) | ||
1059 | goto fail; | ||
1060 | if (rc == 0) { | ||
1061 | edac_dbg(2, "No memory controllers found\n"); | ||
1062 | return -ENODEV; | ||
1063 | } | ||
1064 | |||
1065 | for (m = skx_all_munits; m->did; m++) { | ||
1066 | rc = get_all_munits(m); | ||
1067 | if (rc < 0) | ||
1068 | goto fail; | ||
1069 | if (rc != m->per_socket * skx_num_sockets) { | ||
1070 | edac_dbg(2, "Expected %d, got %d of %x\n", | ||
1071 | m->per_socket * skx_num_sockets, rc, m->did); | ||
1072 | rc = -ENODEV; | ||
1073 | goto fail; | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | list_for_each_entry(d, &skx_edac_list, list) { | ||
1078 | src_id = get_src_id(d); | ||
1079 | node_id = skx_get_node_id(d); | ||
1080 | edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id); | ||
1081 | for (i = 0; i < NUM_IMC; i++) { | ||
1082 | d->imc[i].mc = mc++; | ||
1083 | d->imc[i].lmc = i; | ||
1084 | d->imc[i].src_id = src_id; | ||
1085 | d->imc[i].node_id = node_id; | ||
1086 | rc = skx_register_mci(&d->imc[i]); | ||
1087 | if (rc < 0) | ||
1088 | goto fail; | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1093 | opstate_init(); | ||
1094 | |||
1095 | setup_skx_debug(); | ||
1096 | |||
1097 | mce_register_decode_chain(&skx_mce_dec); | ||
1098 | |||
1099 | return 0; | ||
1100 | fail: | ||
1101 | skx_remove(); | ||
1102 | return rc; | ||
1103 | } | ||
1104 | |||
1105 | static void __exit skx_exit(void) | ||
1106 | { | ||
1107 | edac_dbg(2, "\n"); | ||
1108 | mce_unregister_decode_chain(&skx_mce_dec); | ||
1109 | skx_remove(); | ||
1110 | teardown_skx_debug(); | ||
1111 | } | ||
1112 | |||
1113 | module_init(skx_init); | ||
1114 | module_exit(skx_exit); | ||
1115 | |||
1116 | module_param(edac_op_state, int, 0444); | ||
1117 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
1118 | |||
1119 | MODULE_LICENSE("GPL v2"); | ||
1120 | MODULE_AUTHOR("Tony Luck"); | ||
1121 | MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors"); | ||
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 438893762076..ce2bc2a38101 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c | |||
@@ -709,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev) | |||
709 | struct mbox_client *cl = &pchan->cl; | 709 | struct mbox_client *cl = &pchan->cl; |
710 | struct device_node *shmem = of_parse_phandle(np, "shmem", idx); | 710 | struct device_node *shmem = of_parse_phandle(np, "shmem", idx); |
711 | 711 | ||
712 | if (of_address_to_resource(shmem, 0, &res)) { | 712 | ret = of_address_to_resource(shmem, 0, &res); |
713 | of_node_put(shmem); | ||
714 | if (ret) { | ||
713 | dev_err(dev, "failed to get SCPI payload mem resource\n"); | 715 | dev_err(dev, "failed to get SCPI payload mem resource\n"); |
714 | ret = -EINVAL; | ||
715 | goto err; | 716 | goto err; |
716 | } | 717 | } |
717 | 718 | ||
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 94a58a082b99..44c01390d035 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c | |||
@@ -229,14 +229,14 @@ static int __init dmi_id_init(void) | |||
229 | 229 | ||
230 | ret = device_register(dmi_dev); | 230 | ret = device_register(dmi_dev); |
231 | if (ret) | 231 | if (ret) |
232 | goto fail_free_dmi_dev; | 232 | goto fail_put_dmi_dev; |
233 | 233 | ||
234 | return 0; | 234 | return 0; |
235 | 235 | ||
236 | fail_free_dmi_dev: | 236 | fail_put_dmi_dev: |
237 | kfree(dmi_dev); | 237 | put_device(dmi_dev); |
238 | fail_class_unregister: | ||
239 | 238 | ||
239 | fail_class_unregister: | ||
240 | class_unregister(&dmi_class); | 240 | class_unregister(&dmi_class); |
241 | 241 | ||
242 | return ret; | 242 | return ret; |
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 5a2631af7410..7dd2e2d37231 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname, | |||
657 | } | 657 | } |
658 | 658 | ||
659 | if (subnode) { | 659 | if (subnode) { |
660 | node = of_get_flat_dt_subnode_by_name(node, subnode); | 660 | int err = of_get_flat_dt_subnode_by_name(node, subnode); |
661 | if (node < 0) | 661 | |
662 | if (err < 0) | ||
662 | return 0; | 663 | return 0; |
664 | |||
665 | node = err; | ||
663 | } | 666 | } |
664 | 667 | ||
665 | return __find_uefi_params(node, info, dt_params[i].params); | 668 | return __find_uefi_params(node, info, dt_params[i].params); |
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 3bd127f95315..aded10662020 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c | |||
@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; | |||
41 | #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE | 41 | #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | #define EFI_MMAP_NR_SLACK_SLOTS 8 | ||
45 | |||
44 | struct file_info { | 46 | struct file_info { |
45 | efi_file_handle_t *handle; | 47 | efi_file_handle_t *handle; |
46 | u64 size; | 48 | u64 size; |
@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str) | |||
63 | } | 65 | } |
64 | } | 66 | } |
65 | 67 | ||
68 | static inline bool mmap_has_headroom(unsigned long buff_size, | ||
69 | unsigned long map_size, | ||
70 | unsigned long desc_size) | ||
71 | { | ||
72 | unsigned long slack = buff_size - map_size; | ||
73 | |||
74 | return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; | ||
75 | } | ||
76 | |||
66 | efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, | 77 | efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, |
67 | efi_memory_desc_t **map, | 78 | struct efi_boot_memmap *map) |
68 | unsigned long *map_size, | ||
69 | unsigned long *desc_size, | ||
70 | u32 *desc_ver, | ||
71 | unsigned long *key_ptr) | ||
72 | { | 79 | { |
73 | efi_memory_desc_t *m = NULL; | 80 | efi_memory_desc_t *m = NULL; |
74 | efi_status_t status; | 81 | efi_status_t status; |
75 | unsigned long key; | 82 | unsigned long key; |
76 | u32 desc_version; | 83 | u32 desc_version; |
77 | 84 | ||
78 | *map_size = sizeof(*m) * 32; | 85 | *map->desc_size = sizeof(*m); |
86 | *map->map_size = *map->desc_size * 32; | ||
87 | *map->buff_size = *map->map_size; | ||
79 | again: | 88 | again: |
80 | /* | ||
81 | * Add an additional efi_memory_desc_t because we're doing an | ||
82 | * allocation which may be in a new descriptor region. | ||
83 | */ | ||
84 | *map_size += sizeof(*m); | ||
85 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, | 89 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, |
86 | *map_size, (void **)&m); | 90 | *map->map_size, (void **)&m); |
87 | if (status != EFI_SUCCESS) | 91 | if (status != EFI_SUCCESS) |
88 | goto fail; | 92 | goto fail; |
89 | 93 | ||
90 | *desc_size = 0; | 94 | *map->desc_size = 0; |
91 | key = 0; | 95 | key = 0; |
92 | status = efi_call_early(get_memory_map, map_size, m, | 96 | status = efi_call_early(get_memory_map, map->map_size, m, |
93 | &key, desc_size, &desc_version); | 97 | &key, map->desc_size, &desc_version); |
94 | if (status == EFI_BUFFER_TOO_SMALL) { | 98 | if (status == EFI_BUFFER_TOO_SMALL || |
99 | !mmap_has_headroom(*map->buff_size, *map->map_size, | ||
100 | *map->desc_size)) { | ||
95 | efi_call_early(free_pool, m); | 101 | efi_call_early(free_pool, m); |
102 | /* | ||
103 | * Make sure there is some entries of headroom so that the | ||
104 | * buffer can be reused for a new map after allocations are | ||
105 | * no longer permitted. Its unlikely that the map will grow to | ||
106 | * exceed this headroom once we are ready to trigger | ||
107 | * ExitBootServices() | ||
108 | */ | ||
109 | *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS; | ||
110 | *map->buff_size = *map->map_size; | ||
96 | goto again; | 111 | goto again; |
97 | } | 112 | } |
98 | 113 | ||
99 | if (status != EFI_SUCCESS) | 114 | if (status != EFI_SUCCESS) |
100 | efi_call_early(free_pool, m); | 115 | efi_call_early(free_pool, m); |
101 | 116 | ||
102 | if (key_ptr && status == EFI_SUCCESS) | 117 | if (map->key_ptr && status == EFI_SUCCESS) |
103 | *key_ptr = key; | 118 | *map->key_ptr = key; |
104 | if (desc_ver && status == EFI_SUCCESS) | 119 | if (map->desc_ver && status == EFI_SUCCESS) |
105 | *desc_ver = desc_version; | 120 | *map->desc_ver = desc_version; |
106 | 121 | ||
107 | fail: | 122 | fail: |
108 | *map = m; | 123 | *map->map = m; |
109 | return status; | 124 | return status; |
110 | } | 125 | } |
111 | 126 | ||
@@ -113,13 +128,20 @@ fail: | |||
113 | unsigned long get_dram_base(efi_system_table_t *sys_table_arg) | 128 | unsigned long get_dram_base(efi_system_table_t *sys_table_arg) |
114 | { | 129 | { |
115 | efi_status_t status; | 130 | efi_status_t status; |
116 | unsigned long map_size; | 131 | unsigned long map_size, buff_size; |
117 | unsigned long membase = EFI_ERROR; | 132 | unsigned long membase = EFI_ERROR; |
118 | struct efi_memory_map map; | 133 | struct efi_memory_map map; |
119 | efi_memory_desc_t *md; | 134 | efi_memory_desc_t *md; |
135 | struct efi_boot_memmap boot_map; | ||
120 | 136 | ||
121 | status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map, | 137 | boot_map.map = (efi_memory_desc_t **)&map.map; |
122 | &map_size, &map.desc_size, NULL, NULL); | 138 | boot_map.map_size = &map_size; |
139 | boot_map.desc_size = &map.desc_size; | ||
140 | boot_map.desc_ver = NULL; | ||
141 | boot_map.key_ptr = NULL; | ||
142 | boot_map.buff_size = &buff_size; | ||
143 | |||
144 | status = efi_get_memory_map(sys_table_arg, &boot_map); | ||
123 | if (status != EFI_SUCCESS) | 145 | if (status != EFI_SUCCESS) |
124 | return membase; | 146 | return membase; |
125 | 147 | ||
@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, | |||
144 | unsigned long size, unsigned long align, | 166 | unsigned long size, unsigned long align, |
145 | unsigned long *addr, unsigned long max) | 167 | unsigned long *addr, unsigned long max) |
146 | { | 168 | { |
147 | unsigned long map_size, desc_size; | 169 | unsigned long map_size, desc_size, buff_size; |
148 | efi_memory_desc_t *map; | 170 | efi_memory_desc_t *map; |
149 | efi_status_t status; | 171 | efi_status_t status; |
150 | unsigned long nr_pages; | 172 | unsigned long nr_pages; |
151 | u64 max_addr = 0; | 173 | u64 max_addr = 0; |
152 | int i; | 174 | int i; |
175 | struct efi_boot_memmap boot_map; | ||
176 | |||
177 | boot_map.map = ↦ | ||
178 | boot_map.map_size = &map_size; | ||
179 | boot_map.desc_size = &desc_size; | ||
180 | boot_map.desc_ver = NULL; | ||
181 | boot_map.key_ptr = NULL; | ||
182 | boot_map.buff_size = &buff_size; | ||
153 | 183 | ||
154 | status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, | 184 | status = efi_get_memory_map(sys_table_arg, &boot_map); |
155 | NULL, NULL); | ||
156 | if (status != EFI_SUCCESS) | 185 | if (status != EFI_SUCCESS) |
157 | goto fail; | 186 | goto fail; |
158 | 187 | ||
@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, | |||
230 | unsigned long size, unsigned long align, | 259 | unsigned long size, unsigned long align, |
231 | unsigned long *addr) | 260 | unsigned long *addr) |
232 | { | 261 | { |
233 | unsigned long map_size, desc_size; | 262 | unsigned long map_size, desc_size, buff_size; |
234 | efi_memory_desc_t *map; | 263 | efi_memory_desc_t *map; |
235 | efi_status_t status; | 264 | efi_status_t status; |
236 | unsigned long nr_pages; | 265 | unsigned long nr_pages; |
237 | int i; | 266 | int i; |
267 | struct efi_boot_memmap boot_map; | ||
238 | 268 | ||
239 | status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, | 269 | boot_map.map = ↦ |
240 | NULL, NULL); | 270 | boot_map.map_size = &map_size; |
271 | boot_map.desc_size = &desc_size; | ||
272 | boot_map.desc_ver = NULL; | ||
273 | boot_map.key_ptr = NULL; | ||
274 | boot_map.buff_size = &buff_size; | ||
275 | |||
276 | status = efi_get_memory_map(sys_table_arg, &boot_map); | ||
241 | if (status != EFI_SUCCESS) | 277 | if (status != EFI_SUCCESS) |
242 | goto fail; | 278 | goto fail; |
243 | 279 | ||
@@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, | |||
704 | *cmd_line_len = options_bytes; | 740 | *cmd_line_len = options_bytes; |
705 | return (char *)cmdline_addr; | 741 | return (char *)cmdline_addr; |
706 | } | 742 | } |
743 | |||
744 | /* | ||
745 | * Handle calling ExitBootServices according to the requirements set out by the | ||
746 | * spec. Obtains the current memory map, and returns that info after calling | ||
747 | * ExitBootServices. The client must specify a function to perform any | ||
748 | * processing of the memory map data prior to ExitBootServices. A client | ||
749 | * specific structure may be passed to the function via priv. The client | ||
750 | * function may be called multiple times. | ||
751 | */ | ||
752 | efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, | ||
753 | void *handle, | ||
754 | struct efi_boot_memmap *map, | ||
755 | void *priv, | ||
756 | efi_exit_boot_map_processing priv_func) | ||
757 | { | ||
758 | efi_status_t status; | ||
759 | |||
760 | status = efi_get_memory_map(sys_table_arg, map); | ||
761 | |||
762 | if (status != EFI_SUCCESS) | ||
763 | goto fail; | ||
764 | |||
765 | status = priv_func(sys_table_arg, map, priv); | ||
766 | if (status != EFI_SUCCESS) | ||
767 | goto free_map; | ||
768 | |||
769 | status = efi_call_early(exit_boot_services, handle, *map->key_ptr); | ||
770 | |||
771 | if (status == EFI_INVALID_PARAMETER) { | ||
772 | /* | ||
773 | * The memory map changed between efi_get_memory_map() and | ||
774 | * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4: | ||
775 | * EFI_BOOT_SERVICES.ExitBootServices we need to get the | ||
776 | * updated map, and try again. The spec implies one retry | ||
777 | * should be sufficent, which is confirmed against the EDK2 | ||
778 | * implementation. Per the spec, we can only invoke | ||
779 | * get_memory_map() and exit_boot_services() - we cannot alloc | ||
780 | * so efi_get_memory_map() cannot be used, and we must reuse | ||
781 | * the buffer. For all practical purposes, the headroom in the | ||
782 | * buffer should account for any changes in the map so the call | ||
783 | * to get_memory_map() is expected to succeed here. | ||
784 | */ | ||
785 | *map->map_size = *map->buff_size; | ||
786 | status = efi_call_early(get_memory_map, | ||
787 | map->map_size, | ||
788 | *map->map, | ||
789 | map->key_ptr, | ||
790 | map->desc_size, | ||
791 | map->desc_ver); | ||
792 | |||
793 | /* exit_boot_services() was called, thus cannot free */ | ||
794 | if (status != EFI_SUCCESS) | ||
795 | goto fail; | ||
796 | |||
797 | status = priv_func(sys_table_arg, map, priv); | ||
798 | /* exit_boot_services() was called, thus cannot free */ | ||
799 | if (status != EFI_SUCCESS) | ||
800 | goto fail; | ||
801 | |||
802 | status = efi_call_early(exit_boot_services, handle, *map->key_ptr); | ||
803 | } | ||
804 | |||
805 | /* exit_boot_services() was called, thus cannot free */ | ||
806 | if (status != EFI_SUCCESS) | ||
807 | goto fail; | ||
808 | |||
809 | return EFI_SUCCESS; | ||
810 | |||
811 | free_map: | ||
812 | efi_call_early(free_pool, *map->map); | ||
813 | fail: | ||
814 | return status; | ||
815 | } | ||
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index e58abfa953cc..a6a93116a8f0 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c | |||
@@ -152,6 +152,27 @@ fdt_set_fail: | |||
152 | #define EFI_FDT_ALIGN EFI_PAGE_SIZE | 152 | #define EFI_FDT_ALIGN EFI_PAGE_SIZE |
153 | #endif | 153 | #endif |
154 | 154 | ||
155 | struct exit_boot_struct { | ||
156 | efi_memory_desc_t *runtime_map; | ||
157 | int *runtime_entry_count; | ||
158 | }; | ||
159 | |||
160 | static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, | ||
161 | struct efi_boot_memmap *map, | ||
162 | void *priv) | ||
163 | { | ||
164 | struct exit_boot_struct *p = priv; | ||
165 | /* | ||
166 | * Update the memory map with virtual addresses. The function will also | ||
167 | * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME | ||
168 | * entries so that we can pass it straight to SetVirtualAddressMap() | ||
169 | */ | ||
170 | efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, | ||
171 | p->runtime_map, p->runtime_entry_count); | ||
172 | |||
173 | return EFI_SUCCESS; | ||
174 | } | ||
175 | |||
155 | /* | 176 | /* |
156 | * Allocate memory for a new FDT, then add EFI, commandline, and | 177 | * Allocate memory for a new FDT, then add EFI, commandline, and |
157 | * initrd related fields to the FDT. This routine increases the | 178 | * initrd related fields to the FDT. This routine increases the |
@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, | |||
175 | unsigned long fdt_addr, | 196 | unsigned long fdt_addr, |
176 | unsigned long fdt_size) | 197 | unsigned long fdt_size) |
177 | { | 198 | { |
178 | unsigned long map_size, desc_size; | 199 | unsigned long map_size, desc_size, buff_size; |
179 | u32 desc_ver; | 200 | u32 desc_ver; |
180 | unsigned long mmap_key; | 201 | unsigned long mmap_key; |
181 | efi_memory_desc_t *memory_map, *runtime_map; | 202 | efi_memory_desc_t *memory_map, *runtime_map; |
182 | unsigned long new_fdt_size; | 203 | unsigned long new_fdt_size; |
183 | efi_status_t status; | 204 | efi_status_t status; |
184 | int runtime_entry_count = 0; | 205 | int runtime_entry_count = 0; |
206 | struct efi_boot_memmap map; | ||
207 | struct exit_boot_struct priv; | ||
208 | |||
209 | map.map = &runtime_map; | ||
210 | map.map_size = &map_size; | ||
211 | map.desc_size = &desc_size; | ||
212 | map.desc_ver = &desc_ver; | ||
213 | map.key_ptr = &mmap_key; | ||
214 | map.buff_size = &buff_size; | ||
185 | 215 | ||
186 | /* | 216 | /* |
187 | * Get a copy of the current memory map that we will use to prepare | 217 | * Get a copy of the current memory map that we will use to prepare |
@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, | |||
189 | * subsequent allocations adding entries, since they could not affect | 219 | * subsequent allocations adding entries, since they could not affect |
190 | * the number of EFI_MEMORY_RUNTIME regions. | 220 | * the number of EFI_MEMORY_RUNTIME regions. |
191 | */ | 221 | */ |
192 | status = efi_get_memory_map(sys_table, &runtime_map, &map_size, | 222 | status = efi_get_memory_map(sys_table, &map); |
193 | &desc_size, &desc_ver, &mmap_key); | ||
194 | if (status != EFI_SUCCESS) { | 223 | if (status != EFI_SUCCESS) { |
195 | pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); | 224 | pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); |
196 | return status; | 225 | return status; |
@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, | |||
199 | pr_efi(sys_table, | 228 | pr_efi(sys_table, |
200 | "Exiting boot services and installing virtual address map...\n"); | 229 | "Exiting boot services and installing virtual address map...\n"); |
201 | 230 | ||
231 | map.map = &memory_map; | ||
202 | /* | 232 | /* |
203 | * Estimate size of new FDT, and allocate memory for it. We | 233 | * Estimate size of new FDT, and allocate memory for it. We |
204 | * will allocate a bigger buffer if this ends up being too | 234 | * will allocate a bigger buffer if this ends up being too |
@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, | |||
218 | * we can get the memory map key needed for | 248 | * we can get the memory map key needed for |
219 | * exit_boot_services(). | 249 | * exit_boot_services(). |
220 | */ | 250 | */ |
221 | status = efi_get_memory_map(sys_table, &memory_map, &map_size, | 251 | status = efi_get_memory_map(sys_table, &map); |
222 | &desc_size, &desc_ver, &mmap_key); | ||
223 | if (status != EFI_SUCCESS) | 252 | if (status != EFI_SUCCESS) |
224 | goto fail_free_new_fdt; | 253 | goto fail_free_new_fdt; |
225 | 254 | ||
@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, | |||
250 | } | 279 | } |
251 | } | 280 | } |
252 | 281 | ||
253 | /* | 282 | sys_table->boottime->free_pool(memory_map); |
254 | * Update the memory map with virtual addresses. The function will also | 283 | priv.runtime_map = runtime_map; |
255 | * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME | 284 | priv.runtime_entry_count = &runtime_entry_count; |
256 | * entries so that we can pass it straight into SetVirtualAddressMap() | 285 | status = efi_exit_boot_services(sys_table, handle, &map, &priv, |
257 | */ | 286 | exit_boot_func); |
258 | efi_get_virtmap(memory_map, map_size, desc_size, runtime_map, | ||
259 | &runtime_entry_count); | ||
260 | |||
261 | /* Now we are ready to exit_boot_services.*/ | ||
262 | status = sys_table->boottime->exit_boot_services(handle, mmap_key); | ||
263 | 287 | ||
264 | if (status == EFI_SUCCESS) { | 288 | if (status == EFI_SUCCESS) { |
265 | efi_set_virtual_address_map_t *svam; | 289 | efi_set_virtual_address_map_t *svam; |
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 53f6d3fe6d86..0c9f58c5ba50 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c | |||
@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, | |||
73 | unsigned long random_seed) | 73 | unsigned long random_seed) |
74 | { | 74 | { |
75 | unsigned long map_size, desc_size, total_slots = 0, target_slot; | 75 | unsigned long map_size, desc_size, total_slots = 0, target_slot; |
76 | unsigned long buff_size; | ||
76 | efi_status_t status; | 77 | efi_status_t status; |
77 | efi_memory_desc_t *memory_map; | 78 | efi_memory_desc_t *memory_map; |
78 | int map_offset; | 79 | int map_offset; |
80 | struct efi_boot_memmap map; | ||
79 | 81 | ||
80 | status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size, | 82 | map.map = &memory_map; |
81 | &desc_size, NULL, NULL); | 83 | map.map_size = &map_size; |
84 | map.desc_size = &desc_size; | ||
85 | map.desc_ver = NULL; | ||
86 | map.key_ptr = NULL; | ||
87 | map.buff_size = &buff_size; | ||
88 | |||
89 | status = efi_get_memory_map(sys_table_arg, &map); | ||
82 | if (status != EFI_SUCCESS) | 90 | if (status != EFI_SUCCESS) |
83 | return status; | 91 | return status; |
84 | 92 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 98dd47a30fc7..24caedb00a7a 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -50,6 +50,7 @@ config GPIO_DEVRES | |||
50 | config OF_GPIO | 50 | config OF_GPIO |
51 | def_bool y | 51 | def_bool y |
52 | depends on OF | 52 | depends on OF |
53 | depends on HAS_IOMEM | ||
53 | 54 | ||
54 | config GPIO_ACPI | 55 | config GPIO_ACPI |
55 | def_bool y | 56 | def_bool y |
@@ -188,7 +189,7 @@ config GPIO_EP93XX | |||
188 | config GPIO_ETRAXFS | 189 | config GPIO_ETRAXFS |
189 | bool "Axis ETRAX FS General I/O" | 190 | bool "Axis ETRAX FS General I/O" |
190 | depends on CRIS || COMPILE_TEST | 191 | depends on CRIS || COMPILE_TEST |
191 | depends on OF | 192 | depends on OF_GPIO |
192 | select GPIO_GENERIC | 193 | select GPIO_GENERIC |
193 | select GPIOLIB_IRQCHIP | 194 | select GPIOLIB_IRQCHIP |
194 | help | 195 | help |
@@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM | |||
214 | 215 | ||
215 | config GPIO_GRGPIO | 216 | config GPIO_GRGPIO |
216 | tristate "Aeroflex Gaisler GRGPIO support" | 217 | tristate "Aeroflex Gaisler GRGPIO support" |
217 | depends on OF | 218 | depends on OF_GPIO |
218 | select GPIO_GENERIC | 219 | select GPIO_GENERIC |
219 | select IRQ_DOMAIN | 220 | select IRQ_DOMAIN |
220 | help | 221 | help |
@@ -312,7 +313,7 @@ config GPIO_MPC8XXX | |||
312 | config GPIO_MVEBU | 313 | config GPIO_MVEBU |
313 | def_bool y | 314 | def_bool y |
314 | depends on PLAT_ORION | 315 | depends on PLAT_ORION |
315 | depends on OF | 316 | depends on OF_GPIO |
316 | select GENERIC_IRQ_CHIP | 317 | select GENERIC_IRQ_CHIP |
317 | 318 | ||
318 | config GPIO_MXC | 319 | config GPIO_MXC |
@@ -405,7 +406,7 @@ config GPIO_TEGRA | |||
405 | bool "NVIDIA Tegra GPIO support" | 406 | bool "NVIDIA Tegra GPIO support" |
406 | default ARCH_TEGRA | 407 | default ARCH_TEGRA |
407 | depends on ARCH_TEGRA || COMPILE_TEST | 408 | depends on ARCH_TEGRA || COMPILE_TEST |
408 | depends on OF | 409 | depends on OF_GPIO |
409 | help | 410 | help |
410 | Say yes here to support GPIO pins on NVIDIA Tegra SoCs. | 411 | Say yes here to support GPIO pins on NVIDIA Tegra SoCs. |
411 | 412 | ||
@@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders" | |||
1099 | 1100 | ||
1100 | config GPIO_74X164 | 1101 | config GPIO_74X164 |
1101 | tristate "74x164 serial-in/parallel-out 8-bits shift register" | 1102 | tristate "74x164 serial-in/parallel-out 8-bits shift register" |
1102 | depends on OF | 1103 | depends on OF_GPIO |
1103 | help | 1104 | help |
1104 | Driver for 74x164 compatible serial-in/parallel-out 8-outputs | 1105 | Driver for 74x164 compatible serial-in/parallel-out 8-outputs |
1105 | shift registers. This driver can be used to provide access | 1106 | shift registers. This driver can be used to provide access |
@@ -1130,6 +1131,7 @@ menu "SPI or I2C GPIO expanders" | |||
1130 | 1131 | ||
1131 | config GPIO_MCP23S08 | 1132 | config GPIO_MCP23S08 |
1132 | tristate "Microchip MCP23xxx I/O expander" | 1133 | tristate "Microchip MCP23xxx I/O expander" |
1134 | depends on OF_GPIO | ||
1133 | select GPIOLIB_IRQCHIP | 1135 | select GPIOLIB_IRQCHIP |
1134 | help | 1136 | help |
1135 | SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017 | 1137 | SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017 |
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c index 08807368f007..946d09195598 100644 --- a/drivers/gpio/gpio-max730x.c +++ b/drivers/gpio/gpio-max730x.c | |||
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts) | |||
192 | ts->chip.parent = dev; | 192 | ts->chip.parent = dev; |
193 | ts->chip.owner = THIS_MODULE; | 193 | ts->chip.owner = THIS_MODULE; |
194 | 194 | ||
195 | ret = gpiochip_add_data(&ts->chip, ts); | ||
196 | if (ret) | ||
197 | goto exit_destroy; | ||
198 | |||
195 | /* | 199 | /* |
196 | * initialize pullups according to platform data and cache the | 200 | * initialize pullups according to platform data and cache the |
197 | * register values for later use. | 201 | * register values for later use. |
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts) | |||
213 | } | 217 | } |
214 | } | 218 | } |
215 | 219 | ||
216 | ret = gpiochip_add_data(&ts->chip, ts); | ||
217 | if (ret) | ||
218 | goto exit_destroy; | ||
219 | |||
220 | return ret; | 220 | return ret; |
221 | 221 | ||
222 | exit_destroy: | 222 | exit_destroy: |
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index ac22efc1840e..99d37b56c258 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c | |||
@@ -564,7 +564,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | |||
564 | mcp->chip.direction_output = mcp23s08_direction_output; | 564 | mcp->chip.direction_output = mcp23s08_direction_output; |
565 | mcp->chip.set = mcp23s08_set; | 565 | mcp->chip.set = mcp23s08_set; |
566 | mcp->chip.dbg_show = mcp23s08_dbg_show; | 566 | mcp->chip.dbg_show = mcp23s08_dbg_show; |
567 | #ifdef CONFIG_OF | 567 | #ifdef CONFIG_OF_GPIO |
568 | mcp->chip.of_gpio_n_cells = 2; | 568 | mcp->chip.of_gpio_n_cells = 2; |
569 | mcp->chip.of_node = dev->of_node; | 569 | mcp->chip.of_node = dev->of_node; |
570 | #endif | 570 | #endif |
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c index 0c99e8fb9af3..8d8ee0ebf14c 100644 --- a/drivers/gpio/gpio-sa1100.c +++ b/drivers/gpio/gpio-sa1100.c | |||
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d, | |||
155 | { | 155 | { |
156 | irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, | 156 | irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, |
157 | handle_edge_irq); | 157 | handle_edge_irq); |
158 | irq_set_noprobe(irq); | 158 | irq_set_probe(irq); |
159 | 159 | ||
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 75e7b3919ea7..a28feb3edf33 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/io-mapping.h> | ||
20 | #include <linux/gpio/consumer.h> | 19 | #include <linux/gpio/consumer.h> |
21 | #include <linux/of.h> | 20 | #include <linux/of.h> |
22 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9d79e4ba0213..72c68dbb9821 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -407,7 +407,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | |||
407 | /* | 407 | /* |
408 | * BO. | 408 | * BO. |
409 | */ | 409 | */ |
410 | |||
411 | struct amdgpu_bo_list_entry { | 410 | struct amdgpu_bo_list_entry { |
412 | struct amdgpu_bo *robj; | 411 | struct amdgpu_bo *robj; |
413 | struct ttm_validate_buffer tv; | 412 | struct ttm_validate_buffer tv; |
@@ -620,9 +619,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); | |||
620 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); | 619 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); |
621 | int amdgpu_gart_init(struct amdgpu_device *adev); | 620 | int amdgpu_gart_init(struct amdgpu_device *adev); |
622 | void amdgpu_gart_fini(struct amdgpu_device *adev); | 621 | void amdgpu_gart_fini(struct amdgpu_device *adev); |
623 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | 622 | void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, |
624 | int pages); | 623 | int pages); |
625 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | 624 | int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, |
626 | int pages, struct page **pagelist, | 625 | int pages, struct page **pagelist, |
627 | dma_addr_t *dma_addr, uint32_t flags); | 626 | dma_addr_t *dma_addr, uint32_t flags); |
628 | int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); | 627 | int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 59961db9c390..8e6bf548d689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
@@ -348,6 +348,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * | |||
348 | (le16_to_cpu(path->usConnObjectId) & | 348 | (le16_to_cpu(path->usConnObjectId) & |
349 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 349 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
350 | 350 | ||
351 | /* Skip TV/CV support */ | ||
352 | if ((le16_to_cpu(path->usDeviceTag) == | ||
353 | ATOM_DEVICE_TV1_SUPPORT) || | ||
354 | (le16_to_cpu(path->usDeviceTag) == | ||
355 | ATOM_DEVICE_CV_SUPPORT)) | ||
356 | continue; | ||
357 | |||
358 | if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) { | ||
359 | DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n", | ||
360 | con_obj_id, le16_to_cpu(path->usDeviceTag)); | ||
361 | continue; | ||
362 | } | ||
363 | |||
351 | connector_type = | 364 | connector_type = |
352 | object_connector_convert[con_obj_id]; | 365 | object_connector_convert[con_obj_id]; |
353 | connector_object_id = con_obj_id; | 366 | connector_object_id = con_obj_id; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 550c5ee704ec..dae35a96a694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -205,16 +205,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) | |||
205 | atpx->is_hybrid = false; | 205 | atpx->is_hybrid = false; |
206 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 206 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
207 | printk("ATPX Hybrid Graphics\n"); | 207 | printk("ATPX Hybrid Graphics\n"); |
208 | #if 1 | ||
209 | /* This is a temporary hack until the D3 cold support | ||
210 | * makes it upstream. The ATPX power_control method seems | ||
211 | * to still work on even if the system should be using | ||
212 | * the new standardized hybrid D3 cold ACPI interface. | ||
213 | */ | ||
214 | atpx->functions.power_cntl = true; | ||
215 | #else | ||
216 | atpx->functions.power_cntl = false; | 208 | atpx->functions.power_cntl = false; |
217 | #endif | ||
218 | atpx->is_hybrid = true; | 209 | atpx->is_hybrid = true; |
219 | } | 210 | } |
220 | 211 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 921bce2df0b0..0feea347f680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) | |||
221 | * Unbinds the requested pages from the gart page table and | 221 | * Unbinds the requested pages from the gart page table and |
222 | * replaces them with the dummy page (all asics). | 222 | * replaces them with the dummy page (all asics). |
223 | */ | 223 | */ |
224 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | 224 | void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, |
225 | int pages) | 225 | int pages) |
226 | { | 226 | { |
227 | unsigned t; | 227 | unsigned t; |
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | |||
268 | * (all asics). | 268 | * (all asics). |
269 | * Returns 0 for success, -EINVAL for failure. | 269 | * Returns 0 for success, -EINVAL for failure. |
270 | */ | 270 | */ |
271 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | 271 | int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, |
272 | int pages, struct page **pagelist, dma_addr_t *dma_addr, | 272 | int pages, struct page **pagelist, dma_addr_t *dma_addr, |
273 | uint32_t flags) | 273 | uint32_t flags) |
274 | { | 274 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 4127e7ceace0..6a6c86c9c169 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -295,7 +295,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev) | |||
295 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | 295 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev) |
296 | { | 296 | { |
297 | unsigned i; | 297 | unsigned i; |
298 | int r; | 298 | int r, ret = 0; |
299 | 299 | ||
300 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 300 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
301 | struct amdgpu_ring *ring = adev->rings[i]; | 301 | struct amdgpu_ring *ring = adev->rings[i]; |
@@ -316,10 +316,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | |||
316 | } else { | 316 | } else { |
317 | /* still not good, but we can live with it */ | 317 | /* still not good, but we can live with it */ |
318 | DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); | 318 | DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); |
319 | ret = r; | ||
319 | } | 320 | } |
320 | } | 321 | } |
321 | } | 322 | } |
322 | return 0; | 323 | return ret; |
323 | } | 324 | } |
324 | 325 | ||
325 | /* | 326 | /* |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b63969d7887c..160a094e1a93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -273,8 +273,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
273 | 273 | ||
274 | adev = amdgpu_get_adev(bo->bdev); | 274 | adev = amdgpu_get_adev(bo->bdev); |
275 | ring = adev->mman.buffer_funcs_ring; | 275 | ring = adev->mman.buffer_funcs_ring; |
276 | old_start = old_mem->start << PAGE_SHIFT; | 276 | old_start = (u64)old_mem->start << PAGE_SHIFT; |
277 | new_start = new_mem->start << PAGE_SHIFT; | 277 | new_start = (u64)new_mem->start << PAGE_SHIFT; |
278 | 278 | ||
279 | switch (old_mem->mem_type) { | 279 | switch (old_mem->mem_type) { |
280 | case TTM_PL_TT: | 280 | case TTM_PL_TT: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 25dd58a65905..cee7bc9a2314 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -1154,7 +1154,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1154 | r = 0; | 1154 | r = 0; |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | error: | ||
1158 | fence_put(fence); | 1157 | fence_put(fence); |
1158 | |||
1159 | error: | ||
1159 | return r; | 1160 | return r; |
1160 | } | 1161 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bd5af328154f..a6a48ed9562e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -1593,7 +1593,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1593 | r = amd_sched_entity_init(&ring->sched, &vm->entity, | 1593 | r = amd_sched_entity_init(&ring->sched, &vm->entity, |
1594 | rq, amdgpu_sched_jobs); | 1594 | rq, amdgpu_sched_jobs); |
1595 | if (r) | 1595 | if (r) |
1596 | return r; | 1596 | goto err; |
1597 | 1597 | ||
1598 | vm->page_directory_fence = NULL; | 1598 | vm->page_directory_fence = NULL; |
1599 | 1599 | ||
@@ -1624,6 +1624,9 @@ error_free_page_directory: | |||
1624 | error_free_sched_entity: | 1624 | error_free_sched_entity: |
1625 | amd_sched_entity_fini(&ring->sched, &vm->entity); | 1625 | amd_sched_entity_fini(&ring->sched, &vm->entity); |
1626 | 1626 | ||
1627 | err: | ||
1628 | drm_free_large(vm->page_tables); | ||
1629 | |||
1627 | return r; | 1630 | return r; |
1628 | } | 1631 | } |
1629 | 1632 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index e6d7bf9520a0..cb952acc7133 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev); | |||
52 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); | 52 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev); |
53 | static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); | 53 | static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev); |
54 | static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); | 54 | static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev); |
55 | static int cik_sdma_soft_reset(void *handle); | ||
55 | 56 | ||
56 | MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); | 57 | MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); |
57 | MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); | 58 | MODULE_FIRMWARE("radeon/bonaire_sdma1.bin"); |
@@ -1014,6 +1015,8 @@ static int cik_sdma_resume(void *handle) | |||
1014 | { | 1015 | { |
1015 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1016 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1016 | 1017 | ||
1018 | cik_sdma_soft_reset(handle); | ||
1019 | |||
1017 | return cik_sdma_hw_init(adev); | 1020 | return cik_sdma_hw_init(adev); |
1018 | } | 1021 | } |
1019 | 1022 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 32a676291e67..71116da9e782 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2927,8 +2927,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
2927 | u64 wb_gpu_addr; | 2927 | u64 wb_gpu_addr; |
2928 | u32 *buf; | 2928 | u32 *buf; |
2929 | struct bonaire_mqd *mqd; | 2929 | struct bonaire_mqd *mqd; |
2930 | 2930 | struct amdgpu_ring *ring; | |
2931 | gfx_v7_0_cp_compute_enable(adev, true); | ||
2932 | 2931 | ||
2933 | /* fix up chicken bits */ | 2932 | /* fix up chicken bits */ |
2934 | tmp = RREG32(mmCP_CPF_DEBUG); | 2933 | tmp = RREG32(mmCP_CPF_DEBUG); |
@@ -2963,7 +2962,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
2963 | 2962 | ||
2964 | /* init the queues. Just two for now. */ | 2963 | /* init the queues. Just two for now. */ |
2965 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 2964 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
2966 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | 2965 | ring = &adev->gfx.compute_ring[i]; |
2967 | 2966 | ||
2968 | if (ring->mqd_obj == NULL) { | 2967 | if (ring->mqd_obj == NULL) { |
2969 | r = amdgpu_bo_create(adev, | 2968 | r = amdgpu_bo_create(adev, |
@@ -3142,6 +3141,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
3142 | amdgpu_bo_unreserve(ring->mqd_obj); | 3141 | amdgpu_bo_unreserve(ring->mqd_obj); |
3143 | 3142 | ||
3144 | ring->ready = true; | 3143 | ring->ready = true; |
3144 | } | ||
3145 | |||
3146 | gfx_v7_0_cp_compute_enable(adev, true); | ||
3147 | |||
3148 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
3149 | ring = &adev->gfx.compute_ring[i]; | ||
3150 | |||
3145 | r = amdgpu_ring_test_ring(ring); | 3151 | r = amdgpu_ring_test_ring(ring); |
3146 | if (r) | 3152 | if (r) |
3147 | ring->ready = false; | 3153 | ring->ready = false; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 9ae307505190..565dab3c7218 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -710,7 +710,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
710 | DRM_ERROR("amdgpu: IB test timed out\n"); | 710 | DRM_ERROR("amdgpu: IB test timed out\n"); |
711 | r = -ETIMEDOUT; | 711 | r = -ETIMEDOUT; |
712 | goto err1; | 712 | goto err1; |
713 | } else if (r) { | 713 | } else if (r < 0) { |
714 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | 714 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); |
715 | goto err1; | 715 | goto err1; |
716 | } | 716 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index ad494b875311..453c5d66e5c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | |||
@@ -186,7 +186,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, | |||
186 | sizeof(u32)) + inx; | 186 | sizeof(u32)) + inx; |
187 | 187 | ||
188 | pr_debug("kfd: get kernel queue doorbell\n" | 188 | pr_debug("kfd: get kernel queue doorbell\n" |
189 | " doorbell offset == 0x%08d\n" | 189 | " doorbell offset == 0x%08X\n" |
190 | " kernel address == 0x%08lX\n", | 190 | " kernel address == 0x%08lX\n", |
191 | *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); | 191 | *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); |
192 | 192 | ||
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ef312bb75fda..963a24d46a93 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) | |||
405 | spin_lock(&sched->job_list_lock); | 405 | spin_lock(&sched->job_list_lock); |
406 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, | 406 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, |
407 | struct amd_sched_job, node); | 407 | struct amd_sched_job, node); |
408 | if (s_job) | 408 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) |
409 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); | 409 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
410 | 410 | ||
411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index a978381ef95b..9b17a66cf0e1 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | |||
@@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c) | |||
387 | atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); | 387 | atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); |
388 | } | 388 | } |
389 | 389 | ||
390 | void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) | 390 | static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) |
391 | { | 391 | { |
392 | struct atmel_hlcdc_crtc_state *state; | 392 | struct atmel_hlcdc_crtc_state *state; |
393 | 393 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 72e6b7dd457b..9d4c030672f0 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | |||
@@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, | |||
320 | u32 *coeff_tab = heo_upscaling_ycoef; | 320 | u32 *coeff_tab = heo_upscaling_ycoef; |
321 | u32 max_memsize; | 321 | u32 max_memsize; |
322 | 322 | ||
323 | if (state->crtc_w < state->src_w) | 323 | if (state->crtc_h < state->src_h) |
324 | coeff_tab = heo_downscaling_ycoef; | 324 | coeff_tab = heo_downscaling_ycoef; |
325 | for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++) | 325 | for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++) |
326 | atmel_hlcdc_layer_update_cfg(&plane->layer, | 326 | atmel_hlcdc_layer_update_cfg(&plane->layer, |
327 | 33 + i, | 327 | 33 + i, |
328 | 0xffffffff, | 328 | 0xffffffff, |
329 | coeff_tab[i]); | 329 | coeff_tab[i]); |
330 | factor = ((8 * 256 * state->src_w) - (256 * 4)) / | 330 | factor = ((8 * 256 * state->src_h) - (256 * 4)) / |
331 | state->crtc_w; | 331 | state->crtc_h; |
332 | factor++; | 332 | factor++; |
333 | max_memsize = ((factor * state->crtc_w) + (256 * 4)) / | 333 | max_memsize = ((factor * state->crtc_h) + (256 * 4)) / |
334 | 2048; | 334 | 2048; |
335 | if (max_memsize > state->src_w) | 335 | if (max_memsize > state->src_h) |
336 | factor--; | 336 | factor--; |
337 | factor_reg |= (factor << 16) | 0x80000000; | 337 | factor_reg |= (factor << 16) | 0x80000000; |
338 | } | 338 | } |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 904d29c012ad..23739609427d 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
475 | val, | 475 | val, |
476 | -1, | 476 | -1, |
477 | &replaced); | 477 | &replaced); |
478 | state->color_mgmt_changed = replaced; | 478 | state->color_mgmt_changed |= replaced; |
479 | return ret; | 479 | return ret; |
480 | } else if (property == config->ctm_property) { | 480 | } else if (property == config->ctm_property) { |
481 | ret = drm_atomic_replace_property_blob_from_id(crtc, | 481 | ret = drm_atomic_replace_property_blob_from_id(crtc, |
@@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
483 | val, | 483 | val, |
484 | sizeof(struct drm_color_ctm), | 484 | sizeof(struct drm_color_ctm), |
485 | &replaced); | 485 | &replaced); |
486 | state->color_mgmt_changed = replaced; | 486 | state->color_mgmt_changed |= replaced; |
487 | return ret; | 487 | return ret; |
488 | } else if (property == config->gamma_lut_property) { | 488 | } else if (property == config->gamma_lut_property) { |
489 | ret = drm_atomic_replace_property_blob_from_id(crtc, | 489 | ret = drm_atomic_replace_property_blob_from_id(crtc, |
@@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
491 | val, | 491 | val, |
492 | -1, | 492 | -1, |
493 | &replaced); | 493 | &replaced); |
494 | state->color_mgmt_changed = replaced; | 494 | state->color_mgmt_changed |= replaced; |
495 | return ret; | 495 | return ret; |
496 | } else if (crtc->funcs->atomic_set_property) | 496 | } else if (crtc->funcs->atomic_set_property) |
497 | return crtc->funcs->atomic_set_property(crtc, state, property, val); | 497 | return crtc->funcs->atomic_set_property(crtc, state, property, val); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5e830281bebd..03414bde1f15 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -466,7 +466,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) | |||
466 | 466 | ||
467 | /* Sometimes user space wants everything disabled, so don't steal the | 467 | /* Sometimes user space wants everything disabled, so don't steal the |
468 | * display if there's a master. */ | 468 | * display if there's a master. */ |
469 | if (lockless_dereference(dev->master)) | 469 | if (READ_ONCE(dev->master)) |
470 | return false; | 470 | return false; |
471 | 471 | ||
472 | drm_for_each_crtc(crtc, dev) { | 472 | drm_for_each_crtc(crtc, dev) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index e0166403b4bd..40ce841eb952 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
@@ -55,11 +55,11 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev, | |||
55 | flags = exynos_gem->flags; | 55 | flags = exynos_gem->flags; |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * without iommu support, not support physically non-continuous memory | 58 | * Physically non-contiguous memory type for framebuffer is not |
59 | * for framebuffer. | 59 | * supported without IOMMU. |
60 | */ | 60 | */ |
61 | if (IS_NONCONTIG_BUFFER(flags)) { | 61 | if (IS_NONCONTIG_BUFFER(flags)) { |
62 | DRM_ERROR("cannot use this gem memory type for fb.\n"); | 62 | DRM_ERROR("Non-contiguous GEM memory is not supported.\n"); |
63 | return -EINVAL; | 63 | return -EINVAL; |
64 | } | 64 | } |
65 | 65 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 0525c56145db..147ef0d298cb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
@@ -1753,32 +1753,6 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) | |||
1753 | return 0; | 1753 | return 0; |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | #ifdef CONFIG_PM_SLEEP | ||
1757 | static int fimc_suspend(struct device *dev) | ||
1758 | { | ||
1759 | struct fimc_context *ctx = get_fimc_context(dev); | ||
1760 | |||
1761 | DRM_DEBUG_KMS("id[%d]\n", ctx->id); | ||
1762 | |||
1763 | if (pm_runtime_suspended(dev)) | ||
1764 | return 0; | ||
1765 | |||
1766 | return fimc_clk_ctrl(ctx, false); | ||
1767 | } | ||
1768 | |||
1769 | static int fimc_resume(struct device *dev) | ||
1770 | { | ||
1771 | struct fimc_context *ctx = get_fimc_context(dev); | ||
1772 | |||
1773 | DRM_DEBUG_KMS("id[%d]\n", ctx->id); | ||
1774 | |||
1775 | if (!pm_runtime_suspended(dev)) | ||
1776 | return fimc_clk_ctrl(ctx, true); | ||
1777 | |||
1778 | return 0; | ||
1779 | } | ||
1780 | #endif | ||
1781 | |||
1782 | static int fimc_runtime_suspend(struct device *dev) | 1756 | static int fimc_runtime_suspend(struct device *dev) |
1783 | { | 1757 | { |
1784 | struct fimc_context *ctx = get_fimc_context(dev); | 1758 | struct fimc_context *ctx = get_fimc_context(dev); |
@@ -1799,7 +1773,8 @@ static int fimc_runtime_resume(struct device *dev) | |||
1799 | #endif | 1773 | #endif |
1800 | 1774 | ||
1801 | static const struct dev_pm_ops fimc_pm_ops = { | 1775 | static const struct dev_pm_ops fimc_pm_ops = { |
1802 | SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume) | 1776 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
1777 | pm_runtime_force_resume) | ||
1803 | SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL) | 1778 | SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL) |
1804 | }; | 1779 | }; |
1805 | 1780 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 4bf00f57ffe8..6eca8bb88648 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -1475,8 +1475,8 @@ static int g2d_remove(struct platform_device *pdev) | |||
1475 | return 0; | 1475 | return 0; |
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | #ifdef CONFIG_PM_SLEEP | 1478 | #ifdef CONFIG_PM |
1479 | static int g2d_suspend(struct device *dev) | 1479 | static int g2d_runtime_suspend(struct device *dev) |
1480 | { | 1480 | { |
1481 | struct g2d_data *g2d = dev_get_drvdata(dev); | 1481 | struct g2d_data *g2d = dev_get_drvdata(dev); |
1482 | 1482 | ||
@@ -1490,25 +1490,6 @@ static int g2d_suspend(struct device *dev) | |||
1490 | 1490 | ||
1491 | flush_work(&g2d->runqueue_work); | 1491 | flush_work(&g2d->runqueue_work); |
1492 | 1492 | ||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | static int g2d_resume(struct device *dev) | ||
1497 | { | ||
1498 | struct g2d_data *g2d = dev_get_drvdata(dev); | ||
1499 | |||
1500 | g2d->suspended = false; | ||
1501 | g2d_exec_runqueue(g2d); | ||
1502 | |||
1503 | return 0; | ||
1504 | } | ||
1505 | #endif | ||
1506 | |||
1507 | #ifdef CONFIG_PM | ||
1508 | static int g2d_runtime_suspend(struct device *dev) | ||
1509 | { | ||
1510 | struct g2d_data *g2d = dev_get_drvdata(dev); | ||
1511 | |||
1512 | clk_disable_unprepare(g2d->gate_clk); | 1493 | clk_disable_unprepare(g2d->gate_clk); |
1513 | 1494 | ||
1514 | return 0; | 1495 | return 0; |
@@ -1523,12 +1504,16 @@ static int g2d_runtime_resume(struct device *dev) | |||
1523 | if (ret < 0) | 1504 | if (ret < 0) |
1524 | dev_warn(dev, "failed to enable clock.\n"); | 1505 | dev_warn(dev, "failed to enable clock.\n"); |
1525 | 1506 | ||
1507 | g2d->suspended = false; | ||
1508 | g2d_exec_runqueue(g2d); | ||
1509 | |||
1526 | return ret; | 1510 | return ret; |
1527 | } | 1511 | } |
1528 | #endif | 1512 | #endif |
1529 | 1513 | ||
1530 | static const struct dev_pm_ops g2d_pm_ops = { | 1514 | static const struct dev_pm_ops g2d_pm_ops = { |
1531 | SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) | 1515 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
1516 | pm_runtime_force_resume) | ||
1532 | SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) | 1517 | SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) |
1533 | }; | 1518 | }; |
1534 | 1519 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 5d20da8f957e..52a9d269484e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c | |||
@@ -1760,34 +1760,7 @@ static int gsc_remove(struct platform_device *pdev) | |||
1760 | return 0; | 1760 | return 0; |
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | #ifdef CONFIG_PM_SLEEP | 1763 | static int __maybe_unused gsc_runtime_suspend(struct device *dev) |
1764 | static int gsc_suspend(struct device *dev) | ||
1765 | { | ||
1766 | struct gsc_context *ctx = get_gsc_context(dev); | ||
1767 | |||
1768 | DRM_DEBUG_KMS("id[%d]\n", ctx->id); | ||
1769 | |||
1770 | if (pm_runtime_suspended(dev)) | ||
1771 | return 0; | ||
1772 | |||
1773 | return gsc_clk_ctrl(ctx, false); | ||
1774 | } | ||
1775 | |||
1776 | static int gsc_resume(struct device *dev) | ||
1777 | { | ||
1778 | struct gsc_context *ctx = get_gsc_context(dev); | ||
1779 | |||
1780 | DRM_DEBUG_KMS("id[%d]\n", ctx->id); | ||
1781 | |||
1782 | if (!pm_runtime_suspended(dev)) | ||
1783 | return gsc_clk_ctrl(ctx, true); | ||
1784 | |||
1785 | return 0; | ||
1786 | } | ||
1787 | #endif | ||
1788 | |||
1789 | #ifdef CONFIG_PM | ||
1790 | static int gsc_runtime_suspend(struct device *dev) | ||
1791 | { | 1764 | { |
1792 | struct gsc_context *ctx = get_gsc_context(dev); | 1765 | struct gsc_context *ctx = get_gsc_context(dev); |
1793 | 1766 | ||
@@ -1796,7 +1769,7 @@ static int gsc_runtime_suspend(struct device *dev) | |||
1796 | return gsc_clk_ctrl(ctx, false); | 1769 | return gsc_clk_ctrl(ctx, false); |
1797 | } | 1770 | } |
1798 | 1771 | ||
1799 | static int gsc_runtime_resume(struct device *dev) | 1772 | static int __maybe_unused gsc_runtime_resume(struct device *dev) |
1800 | { | 1773 | { |
1801 | struct gsc_context *ctx = get_gsc_context(dev); | 1774 | struct gsc_context *ctx = get_gsc_context(dev); |
1802 | 1775 | ||
@@ -1804,10 +1777,10 @@ static int gsc_runtime_resume(struct device *dev) | |||
1804 | 1777 | ||
1805 | return gsc_clk_ctrl(ctx, true); | 1778 | return gsc_clk_ctrl(ctx, true); |
1806 | } | 1779 | } |
1807 | #endif | ||
1808 | 1780 | ||
1809 | static const struct dev_pm_ops gsc_pm_ops = { | 1781 | static const struct dev_pm_ops gsc_pm_ops = { |
1810 | SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume) | 1782 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
1783 | pm_runtime_force_resume) | ||
1811 | SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) | 1784 | SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) |
1812 | }; | 1785 | }; |
1813 | 1786 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 404367a430b5..6591e406084c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
@@ -794,29 +794,6 @@ static int rotator_clk_crtl(struct rot_context *rot, bool enable) | |||
794 | return 0; | 794 | return 0; |
795 | } | 795 | } |
796 | 796 | ||
797 | |||
798 | #ifdef CONFIG_PM_SLEEP | ||
799 | static int rotator_suspend(struct device *dev) | ||
800 | { | ||
801 | struct rot_context *rot = dev_get_drvdata(dev); | ||
802 | |||
803 | if (pm_runtime_suspended(dev)) | ||
804 | return 0; | ||
805 | |||
806 | return rotator_clk_crtl(rot, false); | ||
807 | } | ||
808 | |||
809 | static int rotator_resume(struct device *dev) | ||
810 | { | ||
811 | struct rot_context *rot = dev_get_drvdata(dev); | ||
812 | |||
813 | if (!pm_runtime_suspended(dev)) | ||
814 | return rotator_clk_crtl(rot, true); | ||
815 | |||
816 | return 0; | ||
817 | } | ||
818 | #endif | ||
819 | |||
820 | static int rotator_runtime_suspend(struct device *dev) | 797 | static int rotator_runtime_suspend(struct device *dev) |
821 | { | 798 | { |
822 | struct rot_context *rot = dev_get_drvdata(dev); | 799 | struct rot_context *rot = dev_get_drvdata(dev); |
@@ -833,7 +810,8 @@ static int rotator_runtime_resume(struct device *dev) | |||
833 | #endif | 810 | #endif |
834 | 811 | ||
835 | static const struct dev_pm_ops rotator_pm_ops = { | 812 | static const struct dev_pm_ops rotator_pm_ops = { |
836 | SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume) | 813 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
814 | pm_runtime_force_resume) | ||
837 | SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume, | 815 | SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume, |
838 | NULL) | 816 | NULL) |
839 | }; | 817 | }; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c8bd02277b7d..2c8106758922 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2616,6 +2616,8 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
2616 | list_for_each_entry_continue(request, &engine->request_list, link) | 2616 | list_for_each_entry_continue(request, &engine->request_list, link) |
2617 | if (request->ctx == incomplete_ctx) | 2617 | if (request->ctx == incomplete_ctx) |
2618 | reset_request(request); | 2618 | reset_request(request); |
2619 | |||
2620 | engine->i915->gt.active_engines &= ~intel_engine_flag(engine); | ||
2619 | } | 2621 | } |
2620 | 2622 | ||
2621 | void i915_gem_reset(struct drm_i915_private *dev_priv) | 2623 | void i915_gem_reset(struct drm_i915_private *dev_priv) |
@@ -2626,6 +2628,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) | |||
2626 | 2628 | ||
2627 | for_each_engine(engine, dev_priv) | 2629 | for_each_engine(engine, dev_priv) |
2628 | i915_gem_reset_engine(engine); | 2630 | i915_gem_reset_engine(engine); |
2631 | mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); | ||
2629 | 2632 | ||
2630 | i915_gem_restore_fences(&dev_priv->drm); | 2633 | i915_gem_restore_fences(&dev_priv->drm); |
2631 | } | 2634 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 497d99b88468..8d4c35d55b1b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -3601,6 +3601,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
3601 | 3601 | ||
3602 | dev_priv->modeset_restore_state = NULL; | 3602 | dev_priv->modeset_restore_state = NULL; |
3603 | 3603 | ||
3604 | dev_priv->modeset_restore_state = NULL; | ||
3605 | |||
3604 | /* reset doesn't touch the display */ | 3606 | /* reset doesn't touch the display */ |
3605 | if (!gpu_reset_clobbers_display(dev_priv)) { | 3607 | if (!gpu_reset_clobbers_display(dev_priv)) { |
3606 | if (!state) { | 3608 | if (!state) { |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9df29f1cb16a..4e1ae3fc462d 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
@@ -79,6 +79,8 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, | |||
79 | 79 | ||
80 | /* always disable planes on the CRTC */ | 80 | /* always disable planes on the CRTC */ |
81 | drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); | 81 | drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); |
82 | |||
83 | drm_crtc_vblank_off(crtc); | ||
82 | } | 84 | } |
83 | 85 | ||
84 | static void imx_drm_crtc_reset(struct drm_crtc *crtc) | 86 | static void imx_drm_crtc_reset(struct drm_crtc *crtc) |
@@ -183,6 +185,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc, | |||
183 | static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, | 185 | static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, |
184 | struct drm_crtc_state *old_crtc_state) | 186 | struct drm_crtc_state *old_crtc_state) |
185 | { | 187 | { |
188 | drm_crtc_vblank_on(crtc); | ||
189 | |||
186 | spin_lock_irq(&crtc->dev->event_lock); | 190 | spin_lock_irq(&crtc->dev->event_lock); |
187 | if (crtc->state->event) { | 191 | if (crtc->state->event) { |
188 | WARN_ON(drm_crtc_vblank_get(crtc)); | 192 | WARN_ON(drm_crtc_vblank_get(crtc)); |
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 23ac8041c562..294de4549922 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig | |||
@@ -2,6 +2,9 @@ config DRM_MEDIATEK | |||
2 | tristate "DRM Support for Mediatek SoCs" | 2 | tristate "DRM Support for Mediatek SoCs" |
3 | depends on DRM | 3 | depends on DRM |
4 | depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) | 4 | depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) |
5 | depends on COMMON_CLK | ||
6 | depends on HAVE_ARM_SMCCC | ||
7 | depends on OF | ||
5 | select DRM_GEM_CMA_HELPER | 8 | select DRM_GEM_CMA_HELPER |
6 | select DRM_KMS_HELPER | 9 | select DRM_KMS_HELPER |
7 | select DRM_MIPI_DSI | 10 | select DRM_MIPI_DSI |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index b4bc7f1ef717..d0da52f2a806 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -157,6 +157,12 @@ struct msm_drm_private { | |||
157 | struct shrinker shrinker; | 157 | struct shrinker shrinker; |
158 | 158 | ||
159 | struct msm_vblank_ctrl vblank_ctrl; | 159 | struct msm_vblank_ctrl vblank_ctrl; |
160 | |||
161 | /* task holding struct_mutex.. currently only used in submit path | ||
162 | * to detect and reject faults from copy_from_user() for submit | ||
163 | * ioctl. | ||
164 | */ | ||
165 | struct task_struct *struct_mutex_task; | ||
160 | }; | 166 | }; |
161 | 167 | ||
162 | struct msm_format { | 168 | struct msm_format { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 0a9b5580b2e9..b6ac27e31929 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
196 | { | 196 | { |
197 | struct drm_gem_object *obj = vma->vm_private_data; | 197 | struct drm_gem_object *obj = vma->vm_private_data; |
198 | struct drm_device *dev = obj->dev; | 198 | struct drm_device *dev = obj->dev; |
199 | struct msm_drm_private *priv = dev->dev_private; | ||
199 | struct page **pages; | 200 | struct page **pages; |
200 | unsigned long pfn; | 201 | unsigned long pfn; |
201 | pgoff_t pgoff; | 202 | pgoff_t pgoff; |
202 | int ret; | 203 | int ret; |
203 | 204 | ||
205 | /* This should only happen if userspace tries to pass a mmap'd | ||
206 | * but unfaulted gem bo vaddr into submit ioctl, triggering | ||
207 | * a page fault while struct_mutex is already held. This is | ||
208 | * not a valid use-case so just bail. | ||
209 | */ | ||
210 | if (priv->struct_mutex_task == current) | ||
211 | return VM_FAULT_SIGBUS; | ||
212 | |||
204 | /* Make sure we don't parallel update on a fault, nor move or remove | 213 | /* Make sure we don't parallel update on a fault, nor move or remove |
205 | * something from beneath our feet | 214 | * something from beneath our feet |
206 | */ | 215 | */ |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3ac14cd1e5b9..b6a0f37a65f3 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -66,6 +66,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit) | |||
66 | kfree(submit); | 66 | kfree(submit); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline unsigned long __must_check | ||
70 | copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | ||
71 | { | ||
72 | if (access_ok(VERIFY_READ, from, n)) | ||
73 | return __copy_from_user_inatomic(to, from, n); | ||
74 | return -EFAULT; | ||
75 | } | ||
76 | |||
69 | static int submit_lookup_objects(struct msm_gem_submit *submit, | 77 | static int submit_lookup_objects(struct msm_gem_submit *submit, |
70 | struct drm_msm_gem_submit *args, struct drm_file *file) | 78 | struct drm_msm_gem_submit *args, struct drm_file *file) |
71 | { | 79 | { |
@@ -73,6 +81,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
73 | int ret = 0; | 81 | int ret = 0; |
74 | 82 | ||
75 | spin_lock(&file->table_lock); | 83 | spin_lock(&file->table_lock); |
84 | pagefault_disable(); | ||
76 | 85 | ||
77 | for (i = 0; i < args->nr_bos; i++) { | 86 | for (i = 0; i < args->nr_bos; i++) { |
78 | struct drm_msm_gem_submit_bo submit_bo; | 87 | struct drm_msm_gem_submit_bo submit_bo; |
@@ -86,10 +95,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
86 | */ | 95 | */ |
87 | submit->bos[i].flags = 0; | 96 | submit->bos[i].flags = 0; |
88 | 97 | ||
89 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | 98 | ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); |
90 | if (ret) { | 99 | if (unlikely(ret)) { |
91 | ret = -EFAULT; | 100 | pagefault_enable(); |
92 | goto out_unlock; | 101 | spin_unlock(&file->table_lock); |
102 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | ||
103 | if (ret) | ||
104 | goto out; | ||
105 | spin_lock(&file->table_lock); | ||
106 | pagefault_disable(); | ||
93 | } | 107 | } |
94 | 108 | ||
95 | if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { | 109 | if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { |
@@ -129,9 +143,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
129 | } | 143 | } |
130 | 144 | ||
131 | out_unlock: | 145 | out_unlock: |
132 | submit->nr_bos = i; | 146 | pagefault_enable(); |
133 | spin_unlock(&file->table_lock); | 147 | spin_unlock(&file->table_lock); |
134 | 148 | ||
149 | out: | ||
150 | submit->nr_bos = i; | ||
151 | |||
135 | return ret; | 152 | return ret; |
136 | } | 153 | } |
137 | 154 | ||
@@ -392,6 +409,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
392 | goto out_unlock; | 409 | goto out_unlock; |
393 | } | 410 | } |
394 | } | 411 | } |
412 | priv->struct_mutex_task = current; | ||
395 | 413 | ||
396 | submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); | 414 | submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); |
397 | if (!submit) { | 415 | if (!submit) { |
@@ -531,6 +549,7 @@ out: | |||
531 | out_unlock: | 549 | out_unlock: |
532 | if (ret && (out_fence_fd >= 0)) | 550 | if (ret && (out_fence_fd >= 0)) |
533 | put_unused_fd(out_fence_fd); | 551 | put_unused_fd(out_fence_fd); |
552 | priv->struct_mutex_task = NULL; | ||
534 | mutex_unlock(&dev->struct_mutex); | 553 | mutex_unlock(&dev->struct_mutex); |
535 | return ret; | 554 | return ret; |
536 | } | 555 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index f2ad17aa33f0..dc57b628e074 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev) | |||
225 | if (!parent_pdev) | 225 | if (!parent_pdev) |
226 | return false; | 226 | return false; |
227 | 227 | ||
228 | if (!parent_pdev->bridge_d3) { | ||
229 | /* | ||
230 | * Parent PCI bridge is currently not power managed. | ||
231 | * Since userspace can change these afterwards to be on | ||
232 | * the safe side we stick with _DSM and prevent usage of | ||
233 | * _PR3 from the bridge. | ||
234 | */ | ||
235 | pci_d3cold_disable(pdev); | ||
236 | return false; | ||
237 | } | ||
238 | |||
228 | parent_adev = ACPI_COMPANION(&parent_pdev->dev); | 239 | parent_adev = ACPI_COMPANION(&parent_pdev->dev); |
229 | if (!parent_adev) | 240 | if (!parent_adev) |
230 | return false; | 241 | return false; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 4824f70b0258..a4e9f35da3a2 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
627 | if (radeon_crtc->ss.refdiv) { | 627 | if (radeon_crtc->ss.refdiv) { |
628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; | 628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; |
629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; | 629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; |
630 | if (rdev->family >= CHIP_RV770) | 630 | if (ASIC_IS_AVIVO(rdev) && |
631 | rdev->family != CHIP_RS780 && | ||
632 | rdev->family != CHIP_RS880) | ||
631 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 633 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
632 | } | 634 | } |
633 | } | 635 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index a1321b2fa454..2fdcd04bc93f 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
@@ -203,16 +203,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) | |||
203 | atpx->is_hybrid = false; | 203 | atpx->is_hybrid = false; |
204 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 204 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
205 | printk("ATPX Hybrid Graphics\n"); | 205 | printk("ATPX Hybrid Graphics\n"); |
206 | #if 1 | ||
207 | /* This is a temporary hack until the D3 cold support | ||
208 | * makes it upstream. The ATPX power_control method seems | ||
209 | * to still work on even if the system should be using | ||
210 | * the new standardized hybrid D3 cold ACPI interface. | ||
211 | */ | ||
212 | atpx->functions.power_cntl = true; | ||
213 | #else | ||
214 | atpx->functions.power_cntl = false; | 206 | atpx->functions.power_cntl = false; |
215 | #endif | ||
216 | atpx->is_hybrid = true; | 207 | atpx->is_hybrid = true; |
217 | } | 208 | } |
218 | 209 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 27ee0ab0e1a7..455268214b89 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -264,8 +264,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
264 | 264 | ||
265 | rdev = radeon_get_rdev(bo->bdev); | 265 | rdev = radeon_get_rdev(bo->bdev); |
266 | ridx = radeon_copy_ring_index(rdev); | 266 | ridx = radeon_copy_ring_index(rdev); |
267 | old_start = old_mem->start << PAGE_SHIFT; | 267 | old_start = (u64)old_mem->start << PAGE_SHIFT; |
268 | new_start = new_mem->start << PAGE_SHIFT; | 268 | new_start = (u64)new_mem->start << PAGE_SHIFT; |
269 | 269 | ||
270 | switch (old_mem->mem_type) { | 270 | switch (old_mem->mem_type) { |
271 | case TTM_PL_VRAM: | 271 | case TTM_PL_VRAM: |
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 3d228ad90e0f..3dea1216bafd 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c | |||
@@ -840,6 +840,21 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { | |||
840 | .destroy = tegra_output_encoder_destroy, | 840 | .destroy = tegra_output_encoder_destroy, |
841 | }; | 841 | }; |
842 | 842 | ||
843 | static void tegra_dsi_unprepare(struct tegra_dsi *dsi) | ||
844 | { | ||
845 | int err; | ||
846 | |||
847 | if (dsi->slave) | ||
848 | tegra_dsi_unprepare(dsi->slave); | ||
849 | |||
850 | err = tegra_mipi_disable(dsi->mipi); | ||
851 | if (err < 0) | ||
852 | dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n", | ||
853 | err); | ||
854 | |||
855 | pm_runtime_put(dsi->dev); | ||
856 | } | ||
857 | |||
843 | static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) | 858 | static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) |
844 | { | 859 | { |
845 | struct tegra_output *output = encoder_to_output(encoder); | 860 | struct tegra_output *output = encoder_to_output(encoder); |
@@ -876,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) | |||
876 | 891 | ||
877 | tegra_dsi_disable(dsi); | 892 | tegra_dsi_disable(dsi); |
878 | 893 | ||
879 | pm_runtime_put(dsi->dev); | 894 | tegra_dsi_unprepare(dsi); |
895 | } | ||
896 | |||
897 | static void tegra_dsi_prepare(struct tegra_dsi *dsi) | ||
898 | { | ||
899 | int err; | ||
900 | |||
901 | pm_runtime_get_sync(dsi->dev); | ||
902 | |||
903 | err = tegra_mipi_enable(dsi->mipi); | ||
904 | if (err < 0) | ||
905 | dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n", | ||
906 | err); | ||
907 | |||
908 | err = tegra_dsi_pad_calibrate(dsi); | ||
909 | if (err < 0) | ||
910 | dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); | ||
911 | |||
912 | if (dsi->slave) | ||
913 | tegra_dsi_prepare(dsi->slave); | ||
880 | } | 914 | } |
881 | 915 | ||
882 | static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) | 916 | static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) |
@@ -887,13 +921,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) | |||
887 | struct tegra_dsi *dsi = to_dsi(output); | 921 | struct tegra_dsi *dsi = to_dsi(output); |
888 | struct tegra_dsi_state *state; | 922 | struct tegra_dsi_state *state; |
889 | u32 value; | 923 | u32 value; |
890 | int err; | ||
891 | |||
892 | pm_runtime_get_sync(dsi->dev); | ||
893 | 924 | ||
894 | err = tegra_dsi_pad_calibrate(dsi); | 925 | tegra_dsi_prepare(dsi); |
895 | if (err < 0) | ||
896 | dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); | ||
897 | 926 | ||
898 | state = tegra_dsi_get_state(dsi); | 927 | state = tegra_dsi_get_state(dsi); |
899 | 928 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 59adcf8532dd..3f6704cf6608 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c | |||
@@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, | |||
144 | return &vc4->bo_cache.size_list[page_index]; | 144 | return &vc4->bo_cache.size_list[page_index]; |
145 | } | 145 | } |
146 | 146 | ||
147 | void vc4_bo_cache_purge(struct drm_device *dev) | 147 | static void vc4_bo_cache_purge(struct drm_device *dev) |
148 | { | 148 | { |
149 | struct vc4_dev *vc4 = to_vc4_dev(dev); | 149 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
150 | 150 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3c9e7f64b926..8703f56b7947 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
@@ -58,21 +58,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, | |||
58 | switch (args->param) { | 58 | switch (args->param) { |
59 | case DRM_VC4_PARAM_V3D_IDENT0: | 59 | case DRM_VC4_PARAM_V3D_IDENT0: |
60 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); | 60 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
61 | if (ret) | 61 | if (ret < 0) |
62 | return ret; | 62 | return ret; |
63 | args->value = V3D_READ(V3D_IDENT0); | 63 | args->value = V3D_READ(V3D_IDENT0); |
64 | pm_runtime_put(&vc4->v3d->pdev->dev); | 64 | pm_runtime_put(&vc4->v3d->pdev->dev); |
65 | break; | 65 | break; |
66 | case DRM_VC4_PARAM_V3D_IDENT1: | 66 | case DRM_VC4_PARAM_V3D_IDENT1: |
67 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); | 67 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
68 | if (ret) | 68 | if (ret < 0) |
69 | return ret; | 69 | return ret; |
70 | args->value = V3D_READ(V3D_IDENT1); | 70 | args->value = V3D_READ(V3D_IDENT1); |
71 | pm_runtime_put(&vc4->v3d->pdev->dev); | 71 | pm_runtime_put(&vc4->v3d->pdev->dev); |
72 | break; | 72 | break; |
73 | case DRM_VC4_PARAM_V3D_IDENT2: | 73 | case DRM_VC4_PARAM_V3D_IDENT2: |
74 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); | 74 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
75 | if (ret) | 75 | if (ret < 0) |
76 | return ret; | 76 | return ret; |
77 | args->value = V3D_READ(V3D_IDENT2); | 77 | args->value = V3D_READ(V3D_IDENT2); |
78 | pm_runtime_put(&vc4->v3d->pdev->dev); | 78 | pm_runtime_put(&vc4->v3d->pdev->dev); |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 489e3de0c050..428e24919ef1 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h | |||
@@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4) | |||
321 | struct vc4_exec_info, head); | 321 | struct vc4_exec_info, head); |
322 | } | 322 | } |
323 | 323 | ||
324 | static inline struct vc4_exec_info * | ||
325 | vc4_last_render_job(struct vc4_dev *vc4) | ||
326 | { | ||
327 | if (list_empty(&vc4->render_job_list)) | ||
328 | return NULL; | ||
329 | return list_last_entry(&vc4->render_job_list, | ||
330 | struct vc4_exec_info, head); | ||
331 | } | ||
332 | |||
324 | /** | 333 | /** |
325 | * struct vc4_texture_sample_info - saves the offsets into the UBO for texture | 334 | * struct vc4_texture_sample_info - saves the offsets into the UBO for texture |
326 | * setup parameters. | 335 | * setup parameters. |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 27c52ec35193..77daea6cb866 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
@@ -530,8 +530,8 @@ vc4_cl_lookup_bos(struct drm_device *dev, | |||
530 | return -EINVAL; | 530 | return -EINVAL; |
531 | } | 531 | } |
532 | 532 | ||
533 | exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *), | 533 | exec->bo = drm_calloc_large(exec->bo_count, |
534 | GFP_KERNEL); | 534 | sizeof(struct drm_gem_cma_object *)); |
535 | if (!exec->bo) { | 535 | if (!exec->bo) { |
536 | DRM_ERROR("Failed to allocate validated BO pointers\n"); | 536 | DRM_ERROR("Failed to allocate validated BO pointers\n"); |
537 | return -ENOMEM; | 537 | return -ENOMEM; |
@@ -568,8 +568,8 @@ vc4_cl_lookup_bos(struct drm_device *dev, | |||
568 | spin_unlock(&file_priv->table_lock); | 568 | spin_unlock(&file_priv->table_lock); |
569 | 569 | ||
570 | fail: | 570 | fail: |
571 | kfree(handles); | 571 | drm_free_large(handles); |
572 | return 0; | 572 | return ret; |
573 | } | 573 | } |
574 | 574 | ||
575 | static int | 575 | static int |
@@ -604,7 +604,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) | |||
604 | * read the contents back for validation, and I think the | 604 | * read the contents back for validation, and I think the |
605 | * bo->vaddr is uncached access. | 605 | * bo->vaddr is uncached access. |
606 | */ | 606 | */ |
607 | temp = kmalloc(temp_size, GFP_KERNEL); | 607 | temp = drm_malloc_ab(temp_size, 1); |
608 | if (!temp) { | 608 | if (!temp) { |
609 | DRM_ERROR("Failed to allocate storage for copying " | 609 | DRM_ERROR("Failed to allocate storage for copying " |
610 | "in bin/render CLs.\n"); | 610 | "in bin/render CLs.\n"); |
@@ -671,7 +671,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) | |||
671 | ret = vc4_validate_shader_recs(dev, exec); | 671 | ret = vc4_validate_shader_recs(dev, exec); |
672 | 672 | ||
673 | fail: | 673 | fail: |
674 | kfree(temp); | 674 | drm_free_large(temp); |
675 | return ret; | 675 | return ret; |
676 | } | 676 | } |
677 | 677 | ||
@@ -684,7 +684,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) | |||
684 | if (exec->bo) { | 684 | if (exec->bo) { |
685 | for (i = 0; i < exec->bo_count; i++) | 685 | for (i = 0; i < exec->bo_count; i++) |
686 | drm_gem_object_unreference_unlocked(&exec->bo[i]->base); | 686 | drm_gem_object_unreference_unlocked(&exec->bo[i]->base); |
687 | kfree(exec->bo); | 687 | drm_free_large(exec->bo); |
688 | } | 688 | } |
689 | 689 | ||
690 | while (!list_empty(&exec->unref_list)) { | 690 | while (!list_empty(&exec->unref_list)) { |
@@ -938,8 +938,8 @@ vc4_gem_destroy(struct drm_device *dev) | |||
938 | vc4->overflow_mem = NULL; | 938 | vc4->overflow_mem = NULL; |
939 | } | 939 | } |
940 | 940 | ||
941 | vc4_bo_cache_destroy(dev); | ||
942 | |||
943 | if (vc4->hang_state) | 941 | if (vc4->hang_state) |
944 | vc4_free_hang_state(dev, vc4->hang_state); | 942 | vc4_free_hang_state(dev, vc4->hang_state); |
943 | |||
944 | vc4_bo_cache_destroy(dev); | ||
945 | } | 945 | } |
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index b0104a346a74..094bc6a475c1 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c | |||
@@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work) | |||
83 | 83 | ||
84 | spin_lock_irqsave(&vc4->job_lock, irqflags); | 84 | spin_lock_irqsave(&vc4->job_lock, irqflags); |
85 | current_exec = vc4_first_bin_job(vc4); | 85 | current_exec = vc4_first_bin_job(vc4); |
86 | if (!current_exec) | ||
87 | current_exec = vc4_last_render_job(vc4); | ||
86 | if (current_exec) { | 88 | if (current_exec) { |
87 | vc4->overflow_mem->seqno = vc4->finished_seqno + 1; | 89 | vc4->overflow_mem->seqno = current_exec->seqno; |
88 | list_add_tail(&vc4->overflow_mem->unref_head, | 90 | list_add_tail(&vc4->overflow_mem->unref_head, |
89 | ¤t_exec->unref_list); | 91 | ¤t_exec->unref_list); |
90 | vc4->overflow_mem = NULL; | 92 | vc4->overflow_mem = NULL; |
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 46527e989ce3..2543cf5b8b51 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c | |||
@@ -309,8 +309,14 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade | |||
309 | * of uniforms on each side. However, this scheme is easy to | 309 | * of uniforms on each side. However, this scheme is easy to |
310 | * validate so it's all we allow for now. | 310 | * validate so it's all we allow for now. |
311 | */ | 311 | */ |
312 | 312 | switch (QPU_GET_FIELD(inst, QPU_SIG)) { | |
313 | if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) { | 313 | case QPU_SIG_NONE: |
314 | case QPU_SIG_SCOREBOARD_UNLOCK: | ||
315 | case QPU_SIG_COLOR_LOAD: | ||
316 | case QPU_SIG_LOAD_TMU0: | ||
317 | case QPU_SIG_LOAD_TMU1: | ||
318 | break; | ||
319 | default: | ||
314 | DRM_ERROR("uniforms address change must be " | 320 | DRM_ERROR("uniforms address change must be " |
315 | "normal math\n"); | 321 | "normal math\n"); |
316 | return false; | 322 | return false; |
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c index 52a6fd224127..e00809d996a2 100644 --- a/drivers/gpu/host1x/mipi.c +++ b/drivers/gpu/host1x/mipi.c | |||
@@ -242,20 +242,6 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device) | |||
242 | dev->pads = args.args[0]; | 242 | dev->pads = args.args[0]; |
243 | dev->device = device; | 243 | dev->device = device; |
244 | 244 | ||
245 | mutex_lock(&dev->mipi->lock); | ||
246 | |||
247 | if (dev->mipi->usage_count++ == 0) { | ||
248 | err = tegra_mipi_power_up(dev->mipi); | ||
249 | if (err < 0) { | ||
250 | dev_err(dev->mipi->dev, | ||
251 | "failed to power up MIPI bricks: %d\n", | ||
252 | err); | ||
253 | return ERR_PTR(err); | ||
254 | } | ||
255 | } | ||
256 | |||
257 | mutex_unlock(&dev->mipi->lock); | ||
258 | |||
259 | return dev; | 245 | return dev; |
260 | 246 | ||
261 | put: | 247 | put: |
@@ -270,29 +256,42 @@ EXPORT_SYMBOL(tegra_mipi_request); | |||
270 | 256 | ||
271 | void tegra_mipi_free(struct tegra_mipi_device *device) | 257 | void tegra_mipi_free(struct tegra_mipi_device *device) |
272 | { | 258 | { |
273 | int err; | 259 | platform_device_put(device->pdev); |
260 | kfree(device); | ||
261 | } | ||
262 | EXPORT_SYMBOL(tegra_mipi_free); | ||
274 | 263 | ||
275 | mutex_lock(&device->mipi->lock); | 264 | int tegra_mipi_enable(struct tegra_mipi_device *dev) |
265 | { | ||
266 | int err = 0; | ||
276 | 267 | ||
277 | if (--device->mipi->usage_count == 0) { | 268 | mutex_lock(&dev->mipi->lock); |
278 | err = tegra_mipi_power_down(device->mipi); | ||
279 | if (err < 0) { | ||
280 | /* | ||
281 | * Not much that can be done here, so an error message | ||
282 | * will have to do. | ||
283 | */ | ||
284 | dev_err(device->mipi->dev, | ||
285 | "failed to power down MIPI bricks: %d\n", | ||
286 | err); | ||
287 | } | ||
288 | } | ||
289 | 269 | ||
290 | mutex_unlock(&device->mipi->lock); | 270 | if (dev->mipi->usage_count++ == 0) |
271 | err = tegra_mipi_power_up(dev->mipi); | ||
272 | |||
273 | mutex_unlock(&dev->mipi->lock); | ||
274 | |||
275 | return err; | ||
291 | 276 | ||
292 | platform_device_put(device->pdev); | ||
293 | kfree(device); | ||
294 | } | 277 | } |
295 | EXPORT_SYMBOL(tegra_mipi_free); | 278 | EXPORT_SYMBOL(tegra_mipi_enable); |
279 | |||
280 | int tegra_mipi_disable(struct tegra_mipi_device *dev) | ||
281 | { | ||
282 | int err = 0; | ||
283 | |||
284 | mutex_lock(&dev->mipi->lock); | ||
285 | |||
286 | if (--dev->mipi->usage_count == 0) | ||
287 | err = tegra_mipi_power_down(dev->mipi); | ||
288 | |||
289 | mutex_unlock(&dev->mipi->lock); | ||
290 | |||
291 | return err; | ||
292 | |||
293 | } | ||
294 | EXPORT_SYMBOL(tegra_mipi_disable); | ||
296 | 295 | ||
297 | static int tegra_mipi_wait(struct tegra_mipi *mipi) | 296 | static int tegra_mipi_wait(struct tegra_mipi *mipi) |
298 | { | 297 | { |
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 730d84028260..4667012b46b7 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c | |||
@@ -491,7 +491,7 @@ struct it87_sio_data { | |||
491 | struct it87_data { | 491 | struct it87_data { |
492 | const struct attribute_group *groups[7]; | 492 | const struct attribute_group *groups[7]; |
493 | enum chips type; | 493 | enum chips type; |
494 | u16 features; | 494 | u32 features; |
495 | u8 peci_mask; | 495 | u8 peci_mask; |
496 | u8 old_peci_mask; | 496 | u8 old_peci_mask; |
497 | 497 | ||
@@ -2015,6 +2015,7 @@ static struct attribute *it87_attributes_in[] = { | |||
2015 | &sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */ | 2015 | &sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */ |
2016 | &sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */ | 2016 | &sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */ |
2017 | &sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */ | 2017 | &sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */ |
2018 | NULL | ||
2018 | }; | 2019 | }; |
2019 | 2020 | ||
2020 | static const struct attribute_group it87_group_in = { | 2021 | static const struct attribute_group it87_group_in = { |
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index f23372669f77..1bb97f658b47 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ | 38 | #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ |
39 | #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ | 39 | #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ |
40 | #define AUTOSUSPEND_TIMEOUT 2000 | 40 | #define AUTOSUSPEND_TIMEOUT 2000 |
41 | #define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256 | ||
41 | 42 | ||
42 | /* AT91 TWI register definitions */ | 43 | /* AT91 TWI register definitions */ |
43 | #define AT91_TWI_CR 0x0000 /* Control Register */ | 44 | #define AT91_TWI_CR 0x0000 /* Control Register */ |
@@ -141,6 +142,7 @@ struct at91_twi_dev { | |||
141 | unsigned twi_cwgr_reg; | 142 | unsigned twi_cwgr_reg; |
142 | struct at91_twi_pdata *pdata; | 143 | struct at91_twi_pdata *pdata; |
143 | bool use_dma; | 144 | bool use_dma; |
145 | bool use_alt_cmd; | ||
144 | bool recv_len_abort; | 146 | bool recv_len_abort; |
145 | u32 fifo_size; | 147 | u32 fifo_size; |
146 | struct at91_twi_dma dma; | 148 | struct at91_twi_dma dma; |
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) | |||
269 | 271 | ||
270 | /* send stop when last byte has been written */ | 272 | /* send stop when last byte has been written */ |
271 | if (--dev->buf_len == 0) | 273 | if (--dev->buf_len == 0) |
272 | if (!dev->pdata->has_alt_cmd) | 274 | if (!dev->use_alt_cmd) |
273 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 275 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
274 | 276 | ||
275 | dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); | 277 | dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); |
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data) | |||
292 | * we just have to enable TXCOMP one. | 294 | * we just have to enable TXCOMP one. |
293 | */ | 295 | */ |
294 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); | 296 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); |
295 | if (!dev->pdata->has_alt_cmd) | 297 | if (!dev->use_alt_cmd) |
296 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 298 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
297 | } | 299 | } |
298 | 300 | ||
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) | |||
410 | } | 412 | } |
411 | 413 | ||
412 | /* send stop if second but last byte has been read */ | 414 | /* send stop if second but last byte has been read */ |
413 | if (!dev->pdata->has_alt_cmd && dev->buf_len == 1) | 415 | if (!dev->use_alt_cmd && dev->buf_len == 1) |
414 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 416 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
415 | 417 | ||
416 | dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); | 418 | dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); |
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data) | |||
426 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), | 428 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), |
427 | dev->buf_len, DMA_FROM_DEVICE); | 429 | dev->buf_len, DMA_FROM_DEVICE); |
428 | 430 | ||
429 | if (!dev->pdata->has_alt_cmd) { | 431 | if (!dev->use_alt_cmd) { |
430 | /* The last two bytes have to be read without using dma */ | 432 | /* The last two bytes have to be read without using dma */ |
431 | dev->buf += dev->buf_len - 2; | 433 | dev->buf += dev->buf_len - 2; |
432 | dev->buf_len = 2; | 434 | dev->buf_len = 2; |
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev) | |||
443 | struct dma_chan *chan_rx = dma->chan_rx; | 445 | struct dma_chan *chan_rx = dma->chan_rx; |
444 | size_t buf_len; | 446 | size_t buf_len; |
445 | 447 | ||
446 | buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2; | 448 | buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2; |
447 | dma->direction = DMA_FROM_DEVICE; | 449 | dma->direction = DMA_FROM_DEVICE; |
448 | 450 | ||
449 | /* Keep in mind that we won't use dma to read the last two bytes */ | 451 | /* Keep in mind that we won't use dma to read the last two bytes */ |
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) | |||
651 | unsigned start_flags = AT91_TWI_START; | 653 | unsigned start_flags = AT91_TWI_START; |
652 | 654 | ||
653 | /* if only one byte is to be read, immediately stop transfer */ | 655 | /* if only one byte is to be read, immediately stop transfer */ |
654 | if (!has_alt_cmd && dev->buf_len <= 1 && | 656 | if (!dev->use_alt_cmd && dev->buf_len <= 1 && |
655 | !(dev->msg->flags & I2C_M_RECV_LEN)) | 657 | !(dev->msg->flags & I2C_M_RECV_LEN)) |
656 | start_flags |= AT91_TWI_STOP; | 658 | start_flags |= AT91_TWI_STOP; |
657 | at91_twi_write(dev, AT91_TWI_CR, start_flags); | 659 | at91_twi_write(dev, AT91_TWI_CR, start_flags); |
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
745 | int ret; | 747 | int ret; |
746 | unsigned int_addr_flag = 0; | 748 | unsigned int_addr_flag = 0; |
747 | struct i2c_msg *m_start = msg; | 749 | struct i2c_msg *m_start = msg; |
748 | bool is_read, use_alt_cmd = false; | 750 | bool is_read; |
749 | 751 | ||
750 | dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); | 752 | dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); |
751 | 753 | ||
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
768 | at91_twi_write(dev, AT91_TWI_IADR, internal_address); | 770 | at91_twi_write(dev, AT91_TWI_IADR, internal_address); |
769 | } | 771 | } |
770 | 772 | ||
773 | dev->use_alt_cmd = false; | ||
771 | is_read = (m_start->flags & I2C_M_RD); | 774 | is_read = (m_start->flags & I2C_M_RD); |
772 | if (dev->pdata->has_alt_cmd) { | 775 | if (dev->pdata->has_alt_cmd) { |
773 | if (m_start->len > 0) { | 776 | if (m_start->len > 0 && |
777 | m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) { | ||
774 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); | 778 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); |
775 | at91_twi_write(dev, AT91_TWI_ACR, | 779 | at91_twi_write(dev, AT91_TWI_ACR, |
776 | AT91_TWI_ACR_DATAL(m_start->len) | | 780 | AT91_TWI_ACR_DATAL(m_start->len) | |
777 | ((is_read) ? AT91_TWI_ACR_DIR : 0)); | 781 | ((is_read) ? AT91_TWI_ACR_DIR : 0)); |
778 | use_alt_cmd = true; | 782 | dev->use_alt_cmd = true; |
779 | } else { | 783 | } else { |
780 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); | 784 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); |
781 | } | 785 | } |
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
784 | at91_twi_write(dev, AT91_TWI_MMR, | 788 | at91_twi_write(dev, AT91_TWI_MMR, |
785 | (m_start->addr << 16) | | 789 | (m_start->addr << 16) | |
786 | int_addr_flag | | 790 | int_addr_flag | |
787 | ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); | 791 | ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); |
788 | 792 | ||
789 | dev->buf_len = m_start->len; | 793 | dev->buf_len = m_start->len; |
790 | dev->buf = m_start->buf; | 794 | dev->buf = m_start->buf; |
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 19c843828fe2..95f7cac76f89 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c | |||
@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) | |||
158 | 158 | ||
159 | if (status & BIT(IS_M_START_BUSY_SHIFT)) { | 159 | if (status & BIT(IS_M_START_BUSY_SHIFT)) { |
160 | iproc_i2c->xfer_is_done = 1; | 160 | iproc_i2c->xfer_is_done = 1; |
161 | complete_all(&iproc_i2c->done); | 161 | complete(&iproc_i2c->done); |
162 | } | 162 | } |
163 | 163 | ||
164 | writel(status, iproc_i2c->base + IS_OFFSET); | 164 | writel(status, iproc_i2c->base + IS_OFFSET); |
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index ac9f47679c3a..258cb9a40ab3 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c | |||
@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) | |||
229 | dev->base + TXFCR_OFFSET); | 229 | dev->base + TXFCR_OFFSET); |
230 | 230 | ||
231 | writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); | 231 | writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); |
232 | complete_all(&dev->done); | 232 | complete(&dev->done); |
233 | 233 | ||
234 | return IRQ_HANDLED; | 234 | return IRQ_HANDLED; |
235 | } | 235 | } |
@@ -643,7 +643,7 @@ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter, | |||
643 | if (rc < 0) { | 643 | if (rc < 0) { |
644 | dev_err(dev->device, | 644 | dev_err(dev->device, |
645 | "restart cmd failed rc = %d\n", rc); | 645 | "restart cmd failed rc = %d\n", rc); |
646 | goto xfer_send_stop; | 646 | goto xfer_send_stop; |
647 | } | 647 | } |
648 | } | 648 | } |
649 | 649 | ||
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 3f5a4d71d3bf..385b57bfcb38 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c | |||
@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid) | |||
228 | return IRQ_NONE; | 228 | return IRQ_NONE; |
229 | 229 | ||
230 | brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); | 230 | brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); |
231 | complete_all(&dev->done); | 231 | complete(&dev->done); |
232 | 232 | ||
233 | dev_dbg(dev->device, "isr handled"); | 233 | dev_dbg(dev->device, "isr handled"); |
234 | return IRQ_HANDLED; | 234 | return IRQ_HANDLED; |
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 90bbd9f9dd8f..3c16a2f7c673 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c | |||
@@ -767,7 +767,7 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id) | |||
767 | * depending on the scaling direction. | 767 | * depending on the scaling direction. |
768 | * | 768 | * |
769 | * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK | 769 | * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK |
770 | * to acknowedge the change, NOTIFY_DONE if the notification is | 770 | * to acknowledge the change, NOTIFY_DONE if the notification is |
771 | * considered irrelevant. | 771 | * considered irrelevant. |
772 | */ | 772 | */ |
773 | static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long | 773 | static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long |
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index a0d95ff682ae..2d5ff86398d0 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c | |||
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], | |||
215 | msg->outsize = request_len; | 215 | msg->outsize = request_len; |
216 | msg->insize = response_len; | 216 | msg->insize = response_len; |
217 | 217 | ||
218 | result = cros_ec_cmd_xfer(bus->ec, msg); | 218 | result = cros_ec_cmd_xfer_status(bus->ec, msg); |
219 | if (result < 0) { | 219 | if (result < 0) { |
220 | dev_err(dev, "Error transferring EC i2c message %d\n", result); | 220 | dev_err(dev, "Error transferring EC i2c message %d\n", result); |
221 | goto exit; | 221 | goto exit; |
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index c6922b806fb7..fcd973d5131e 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c | |||
@@ -367,13 +367,17 @@ int i2c_dw_init(struct dw_i2c_dev *dev) | |||
367 | dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); | 367 | dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); |
368 | 368 | ||
369 | /* Configure SDA Hold Time if required */ | 369 | /* Configure SDA Hold Time if required */ |
370 | if (dev->sda_hold_time) { | 370 | reg = dw_readl(dev, DW_IC_COMP_VERSION); |
371 | reg = dw_readl(dev, DW_IC_COMP_VERSION); | 371 | if (reg >= DW_IC_SDA_HOLD_MIN_VERS) { |
372 | if (reg >= DW_IC_SDA_HOLD_MIN_VERS) | 372 | if (dev->sda_hold_time) { |
373 | dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); | 373 | dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); |
374 | else | 374 | } else { |
375 | dev_warn(dev->dev, | 375 | /* Keep previous hold time setting if no one set it */ |
376 | "Hardware too old to adjust SDA hold time."); | 376 | dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD); |
377 | } | ||
378 | } else { | ||
379 | dev_warn(dev->dev, | ||
380 | "Hardware too old to adjust SDA hold time.\n"); | ||
377 | } | 381 | } |
378 | 382 | ||
379 | /* Configure Tx/Rx FIFO threshold levels */ | 383 | /* Configure Tx/Rx FIFO threshold levels */ |
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 137125b5eae7..5ce71ce7b6c4 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c | |||
@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev, | |||
773 | /* Set the number of I2C channel instance */ | 773 | /* Set the number of I2C channel instance */ |
774 | adap_info->ch_num = id->driver_data; | 774 | adap_info->ch_num = id->driver_data; |
775 | 775 | ||
776 | ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, | ||
777 | KBUILD_MODNAME, adap_info); | ||
778 | if (ret) { | ||
779 | pch_pci_err(pdev, "request_irq FAILED\n"); | ||
780 | goto err_request_irq; | ||
781 | } | ||
782 | |||
783 | for (i = 0; i < adap_info->ch_num; i++) { | 776 | for (i = 0; i < adap_info->ch_num; i++) { |
784 | pch_adap = &adap_info->pch_data[i].pch_adapter; | 777 | pch_adap = &adap_info->pch_data[i].pch_adapter; |
785 | adap_info->pch_i2c_suspended = false; | 778 | adap_info->pch_i2c_suspended = false; |
@@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev, | |||
797 | 790 | ||
798 | pch_adap->dev.of_node = pdev->dev.of_node; | 791 | pch_adap->dev.of_node = pdev->dev.of_node; |
799 | pch_adap->dev.parent = &pdev->dev; | 792 | pch_adap->dev.parent = &pdev->dev; |
793 | } | ||
794 | |||
795 | ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, | ||
796 | KBUILD_MODNAME, adap_info); | ||
797 | if (ret) { | ||
798 | pch_pci_err(pdev, "request_irq FAILED\n"); | ||
799 | goto err_request_irq; | ||
800 | } | ||
801 | |||
802 | for (i = 0; i < adap_info->ch_num; i++) { | ||
803 | pch_adap = &adap_info->pch_data[i].pch_adapter; | ||
800 | 804 | ||
801 | pch_i2c_init(&adap_info->pch_data[i]); | 805 | pch_i2c_init(&adap_info->pch_data[i]); |
802 | 806 | ||
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 71d3929adf54..76e28980904f 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c | |||
@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c) | |||
211 | meson_i2c_add_token(i2c, TOKEN_STOP); | 211 | meson_i2c_add_token(i2c, TOKEN_STOP); |
212 | } else { | 212 | } else { |
213 | i2c->state = STATE_IDLE; | 213 | i2c->state = STATE_IDLE; |
214 | complete_all(&i2c->done); | 214 | complete(&i2c->done); |
215 | } | 215 | } |
216 | } | 216 | } |
217 | 217 | ||
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) | |||
238 | dev_dbg(i2c->dev, "error bit set\n"); | 238 | dev_dbg(i2c->dev, "error bit set\n"); |
239 | i2c->error = -ENXIO; | 239 | i2c->error = -ENXIO; |
240 | i2c->state = STATE_IDLE; | 240 | i2c->state = STATE_IDLE; |
241 | complete_all(&i2c->done); | 241 | complete(&i2c->done); |
242 | goto out; | 242 | goto out; |
243 | } | 243 | } |
244 | 244 | ||
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) | |||
269 | break; | 269 | break; |
270 | case STATE_STOP: | 270 | case STATE_STOP: |
271 | i2c->state = STATE_IDLE; | 271 | i2c->state = STATE_IDLE; |
272 | complete_all(&i2c->done); | 272 | complete(&i2c->done); |
273 | break; | 273 | break; |
274 | case STATE_IDLE: | 274 | case STATE_IDLE: |
275 | break; | 275 | break; |
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index dfa7a4b4a91d..ac88a524143e 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, | |||
379 | if (!clock_frequency_present) { | 379 | if (!clock_frequency_present) { |
380 | dev_err(&pdev->dev, | 380 | dev_err(&pdev->dev, |
381 | "Missing required parameter 'opencores,ip-clock-frequency'\n"); | 381 | "Missing required parameter 'opencores,ip-clock-frequency'\n"); |
382 | clk_disable_unprepare(i2c->clk); | ||
382 | return -ENODEV; | 383 | return -ENODEV; |
383 | } | 384 | } |
384 | i2c->ip_clock_khz = clock_frequency / 1000; | 385 | i2c->ip_clock_khz = clock_frequency / 1000; |
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
467 | default: | 468 | default: |
468 | dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", | 469 | dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", |
469 | i2c->reg_io_width); | 470 | i2c->reg_io_width); |
470 | return -EINVAL; | 471 | ret = -EINVAL; |
472 | goto err_clk; | ||
471 | } | 473 | } |
472 | } | 474 | } |
473 | 475 | ||
474 | ret = ocores_init(&pdev->dev, i2c); | 476 | ret = ocores_init(&pdev->dev, i2c); |
475 | if (ret) | 477 | if (ret) |
476 | return ret; | 478 | goto err_clk; |
477 | 479 | ||
478 | init_waitqueue_head(&i2c->wait); | 480 | init_waitqueue_head(&i2c->wait); |
479 | ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, | 481 | ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, |
480 | pdev->name, i2c); | 482 | pdev->name, i2c); |
481 | if (ret) { | 483 | if (ret) { |
482 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | 484 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); |
483 | return ret; | 485 | goto err_clk; |
484 | } | 486 | } |
485 | 487 | ||
486 | /* hook up driver to tree */ | 488 | /* hook up driver to tree */ |
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
494 | ret = i2c_add_adapter(&i2c->adap); | 496 | ret = i2c_add_adapter(&i2c->adap); |
495 | if (ret) { | 497 | if (ret) { |
496 | dev_err(&pdev->dev, "Failed to add adapter\n"); | 498 | dev_err(&pdev->dev, "Failed to add adapter\n"); |
497 | return ret; | 499 | goto err_clk; |
498 | } | 500 | } |
499 | 501 | ||
500 | /* add in known devices to the bus */ | 502 | /* add in known devices to the bus */ |
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
504 | } | 506 | } |
505 | 507 | ||
506 | return 0; | 508 | return 0; |
509 | |||
510 | err_clk: | ||
511 | clk_disable_unprepare(i2c->clk); | ||
512 | return ret; | ||
507 | } | 513 | } |
508 | 514 | ||
509 | static int ocores_i2c_remove(struct platform_device *pdev) | 515 | static int ocores_i2c_remove(struct platform_device *pdev) |
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 501bd15cb78e..a8497cfdae6f 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c | |||
@@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device) | |||
1599 | #ifdef CONFIG_PM_SLEEP | 1599 | #ifdef CONFIG_PM_SLEEP |
1600 | static int qup_i2c_suspend(struct device *device) | 1600 | static int qup_i2c_suspend(struct device *device) |
1601 | { | 1601 | { |
1602 | qup_i2c_pm_suspend_runtime(device); | 1602 | if (!pm_runtime_suspended(device)) |
1603 | return qup_i2c_pm_suspend_runtime(device); | ||
1603 | return 0; | 1604 | return 0; |
1604 | } | 1605 | } |
1605 | 1606 | ||
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 52407f3c9e1c..9bd849dacee8 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
@@ -378,7 +378,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv) | |||
378 | } | 378 | } |
379 | 379 | ||
380 | dma_addr = dma_map_single(chan->device->dev, buf, len, dir); | 380 | dma_addr = dma_map_single(chan->device->dev, buf, len, dir); |
381 | if (dma_mapping_error(dev, dma_addr)) { | 381 | if (dma_mapping_error(chan->device->dev, dma_addr)) { |
382 | dev_dbg(dev, "dma map failed, using PIO\n"); | 382 | dev_dbg(dev, "dma map failed, using PIO\n"); |
383 | return; | 383 | return; |
384 | } | 384 | } |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 2bc8b01153d6..5c5b7cada8be 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c | |||
@@ -918,7 +918,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate) | |||
918 | * Code adapted from i2c-cadence.c. | 918 | * Code adapted from i2c-cadence.c. |
919 | * | 919 | * |
920 | * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK | 920 | * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK |
921 | * to acknowedge the change, NOTIFY_DONE if the notification is | 921 | * to acknowledge the change, NOTIFY_DONE if the notification is |
922 | * considered irrelevant. | 922 | * considered irrelevant. |
923 | */ | 923 | */ |
924 | static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long | 924 | static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long |
@@ -1111,6 +1111,15 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, | |||
1111 | return ret < 0 ? ret : num; | 1111 | return ret < 0 ? ret : num; |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | static __maybe_unused int rk3x_i2c_resume(struct device *dev) | ||
1115 | { | ||
1116 | struct rk3x_i2c *i2c = dev_get_drvdata(dev); | ||
1117 | |||
1118 | rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk)); | ||
1119 | |||
1120 | return 0; | ||
1121 | } | ||
1122 | |||
1114 | static u32 rk3x_i2c_func(struct i2c_adapter *adap) | 1123 | static u32 rk3x_i2c_func(struct i2c_adapter *adap) |
1115 | { | 1124 | { |
1116 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; | 1125 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; |
@@ -1334,12 +1343,15 @@ static int rk3x_i2c_remove(struct platform_device *pdev) | |||
1334 | return 0; | 1343 | return 0; |
1335 | } | 1344 | } |
1336 | 1345 | ||
1346 | static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume); | ||
1347 | |||
1337 | static struct platform_driver rk3x_i2c_driver = { | 1348 | static struct platform_driver rk3x_i2c_driver = { |
1338 | .probe = rk3x_i2c_probe, | 1349 | .probe = rk3x_i2c_probe, |
1339 | .remove = rk3x_i2c_remove, | 1350 | .remove = rk3x_i2c_remove, |
1340 | .driver = { | 1351 | .driver = { |
1341 | .name = "rk3x-i2c", | 1352 | .name = "rk3x-i2c", |
1342 | .of_match_table = rk3x_i2c_match, | 1353 | .of_match_table = rk3x_i2c_match, |
1354 | .pm = &rk3x_i2c_pm_ops, | ||
1343 | }, | 1355 | }, |
1344 | }; | 1356 | }; |
1345 | 1357 | ||
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 6fb3e2645992..05b1eeab9cf5 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
@@ -610,7 +610,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) | |||
610 | return; | 610 | return; |
611 | 611 | ||
612 | dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir); | 612 | dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir); |
613 | if (dma_mapping_error(pd->dev, dma_addr)) { | 613 | if (dma_mapping_error(chan->device->dev, dma_addr)) { |
614 | dev_dbg(pd->dev, "dma map failed, using PIO\n"); | 614 | dev_dbg(pd->dev, "dma map failed, using PIO\n"); |
615 | return; | 615 | return; |
616 | } | 616 | } |
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 8de073aed001..b3893f6282ba 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c | |||
@@ -37,8 +37,6 @@ struct i2c_demux_pinctrl_priv { | |||
37 | struct i2c_demux_pinctrl_chan chan[]; | 37 | struct i2c_demux_pinctrl_chan chan[]; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct property status_okay = { .name = "status", .length = 3, .value = "ok" }; | ||
41 | |||
42 | static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | 40 | static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) |
43 | { | 41 | { |
44 | struct i2c_demux_pinctrl_priv *priv = adap->algo_data; | 42 | struct i2c_demux_pinctrl_priv *priv = adap->algo_data; |
@@ -68,7 +66,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne | |||
68 | adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); | 66 | adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); |
69 | if (!adap) { | 67 | if (!adap) { |
70 | ret = -ENODEV; | 68 | ret = -ENODEV; |
71 | goto err; | 69 | goto err_with_revert; |
72 | } | 70 | } |
73 | 71 | ||
74 | p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); | 72 | p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); |
@@ -103,8 +101,11 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne | |||
103 | 101 | ||
104 | err_with_put: | 102 | err_with_put: |
105 | i2c_put_adapter(adap); | 103 | i2c_put_adapter(adap); |
104 | err_with_revert: | ||
105 | of_changeset_revert(&priv->chan[new_chan].chgset); | ||
106 | err: | 106 | err: |
107 | dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); | 107 | dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); |
108 | priv->cur_chan = -EINVAL; | ||
108 | return ret; | 109 | return ret; |
109 | } | 110 | } |
110 | 111 | ||
@@ -190,6 +191,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) | |||
190 | { | 191 | { |
191 | struct device_node *np = pdev->dev.of_node; | 192 | struct device_node *np = pdev->dev.of_node; |
192 | struct i2c_demux_pinctrl_priv *priv; | 193 | struct i2c_demux_pinctrl_priv *priv; |
194 | struct property *props; | ||
193 | int num_chan, i, j, err; | 195 | int num_chan, i, j, err; |
194 | 196 | ||
195 | num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL); | 197 | num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL); |
@@ -200,7 +202,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) | |||
200 | 202 | ||
201 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv) | 203 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv) |
202 | + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL); | 204 | + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL); |
203 | if (!priv) | 205 | |
206 | props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL); | ||
207 | |||
208 | if (!priv || !props) | ||
204 | return -ENOMEM; | 209 | return -ENOMEM; |
205 | 210 | ||
206 | err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name); | 211 | err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name); |
@@ -218,8 +223,12 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) | |||
218 | } | 223 | } |
219 | priv->chan[i].parent_np = adap_np; | 224 | priv->chan[i].parent_np = adap_np; |
220 | 225 | ||
226 | props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL); | ||
227 | props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL); | ||
228 | props[i].length = 3; | ||
229 | |||
221 | of_changeset_init(&priv->chan[i].chgset); | 230 | of_changeset_init(&priv->chan[i].chgset); |
222 | of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay); | 231 | of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]); |
223 | } | 232 | } |
224 | 233 | ||
225 | priv->num_chan = num_chan; | 234 | priv->num_chan = num_chan; |
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 528e755c468f..3278ebf1cc5c 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c | |||
@@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan) | |||
164 | /* Only select the channel if its different from the last channel */ | 164 | /* Only select the channel if its different from the last channel */ |
165 | if (data->last_chan != regval) { | 165 | if (data->last_chan != regval) { |
166 | ret = pca954x_reg_write(muxc->parent, client, regval); | 166 | ret = pca954x_reg_write(muxc->parent, client, regval); |
167 | data->last_chan = regval; | 167 | data->last_chan = ret ? 0 : regval; |
168 | } | 168 | } |
169 | 169 | ||
170 | return ret; | 170 | return ret; |
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index 89d78208de3f..78f148ea9d9f 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig | |||
@@ -20,6 +20,8 @@ config BMA180 | |||
20 | config BMA220 | 20 | config BMA220 |
21 | tristate "Bosch BMA220 3-Axis Accelerometer Driver" | 21 | tristate "Bosch BMA220 3-Axis Accelerometer Driver" |
22 | depends on SPI | 22 | depends on SPI |
23 | select IIO_BUFFER | ||
24 | select IIO_TRIGGERED_BUFFER | ||
23 | help | 25 | help |
24 | Say yes here to add support for the Bosch BMA220 triaxial | 26 | Say yes here to add support for the Bosch BMA220 triaxial |
25 | acceleration sensor. | 27 | acceleration sensor. |
@@ -234,7 +236,8 @@ config STK8312 | |||
234 | config STK8BA50 | 236 | config STK8BA50 |
235 | tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver" | 237 | tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver" |
236 | depends on I2C | 238 | depends on I2C |
237 | depends on IIO_TRIGGER | 239 | select IIO_BUFFER |
240 | select IIO_TRIGGERED_BUFFER | ||
238 | help | 241 | help |
239 | Say yes here to get support for the Sensortek STK8BA50 3-axis | 242 | Say yes here to get support for the Sensortek STK8BA50 3-axis |
240 | accelerometer. | 243 | accelerometer. |
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c index 1098d10df8e8..5099f295dd37 100644 --- a/drivers/iio/accel/bma220_spi.c +++ b/drivers/iio/accel/bma220_spi.c | |||
@@ -253,7 +253,7 @@ static int bma220_probe(struct spi_device *spi) | |||
253 | if (ret < 0) | 253 | if (ret < 0) |
254 | return ret; | 254 | return ret; |
255 | 255 | ||
256 | ret = iio_triggered_buffer_setup(indio_dev, NULL, | 256 | ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time, |
257 | bma220_trigger_handler, NULL); | 257 | bma220_trigger_handler, NULL); |
258 | if (ret < 0) { | 258 | if (ret < 0) { |
259 | dev_err(&spi->dev, "iio triggered buffer setup failed\n"); | 259 | dev_err(&spi->dev, "iio triggered buffer setup failed\n"); |
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index bf17aae66145..59b380dbf27f 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c | |||
@@ -67,6 +67,9 @@ | |||
67 | #define BMC150_ACCEL_REG_PMU_BW 0x10 | 67 | #define BMC150_ACCEL_REG_PMU_BW 0x10 |
68 | #define BMC150_ACCEL_DEF_BW 125 | 68 | #define BMC150_ACCEL_DEF_BW 125 |
69 | 69 | ||
70 | #define BMC150_ACCEL_REG_RESET 0x14 | ||
71 | #define BMC150_ACCEL_RESET_VAL 0xB6 | ||
72 | |||
70 | #define BMC150_ACCEL_REG_INT_MAP_0 0x19 | 73 | #define BMC150_ACCEL_REG_INT_MAP_0 0x19 |
71 | #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2) | 74 | #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2) |
72 | 75 | ||
@@ -1497,6 +1500,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data) | |||
1497 | int ret, i; | 1500 | int ret, i; |
1498 | unsigned int val; | 1501 | unsigned int val; |
1499 | 1502 | ||
1503 | /* | ||
1504 | * Reset chip to get it in a known good state. A delay of 1.8ms after | ||
1505 | * reset is required according to the data sheets of supported chips. | ||
1506 | */ | ||
1507 | regmap_write(data->regmap, BMC150_ACCEL_REG_RESET, | ||
1508 | BMC150_ACCEL_RESET_VAL); | ||
1509 | usleep_range(1800, 2500); | ||
1510 | |||
1500 | ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val); | 1511 | ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val); |
1501 | if (ret < 0) { | 1512 | if (ret < 0) { |
1502 | dev_err(dev, "Error: Reading chip id\n"); | 1513 | dev_err(dev, "Error: Reading chip id\n"); |
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 3a9f106787d2..9d72d4bcf5e9 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c | |||
@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, | |||
160 | if (ret < 0) | 160 | if (ret < 0) |
161 | goto error_ret; | 161 | goto error_ret; |
162 | *val = ret; | 162 | *val = ret; |
163 | ret = IIO_VAL_INT; | ||
163 | break; | 164 | break; |
164 | case IIO_CHAN_INFO_SCALE: | 165 | case IIO_CHAN_INFO_SCALE: |
165 | ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); | 166 | ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); |
166 | if (ret < 0) | 167 | if (ret < 0) |
167 | goto error_ret; | 168 | goto error_ret; |
169 | *val = 0; | ||
168 | *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; | 170 | *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; |
169 | ret = IIO_VAL_INT_PLUS_MICRO; | 171 | ret = IIO_VAL_INT_PLUS_MICRO; |
170 | break; | 172 | break; |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 1de31bdd4ce4..767577298ee3 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig | |||
@@ -389,6 +389,7 @@ config QCOM_SPMI_VADC | |||
389 | config ROCKCHIP_SARADC | 389 | config ROCKCHIP_SARADC |
390 | tristate "Rockchip SARADC driver" | 390 | tristate "Rockchip SARADC driver" |
391 | depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST) | 391 | depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST) |
392 | depends on RESET_CONTROLLER | ||
392 | help | 393 | help |
393 | Say yes here to build support for the SARADC found in SoCs from | 394 | Say yes here to build support for the SARADC found in SoCs from |
394 | Rockchip. | 395 | Rockchip. |
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index b6163764489c..9704090b7908 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c | |||
@@ -527,6 +527,7 @@ static struct attribute_group ad799x_event_attrs_group = { | |||
527 | static const struct iio_info ad7991_info = { | 527 | static const struct iio_info ad7991_info = { |
528 | .read_raw = &ad799x_read_raw, | 528 | .read_raw = &ad799x_read_raw, |
529 | .driver_module = THIS_MODULE, | 529 | .driver_module = THIS_MODULE, |
530 | .update_scan_mode = ad799x_update_scan_mode, | ||
530 | }; | 531 | }; |
531 | 532 | ||
532 | static const struct iio_info ad7993_4_7_8_noirq_info = { | 533 | static const struct iio_info ad7993_4_7_8_noirq_info = { |
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 52430ba171f3..0438c68015e8 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c | |||
@@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private) | |||
381 | st->ts_bufferedmeasure = false; | 381 | st->ts_bufferedmeasure = false; |
382 | input_report_key(st->ts_input, BTN_TOUCH, 0); | 382 | input_report_key(st->ts_input, BTN_TOUCH, 0); |
383 | input_sync(st->ts_input); | 383 | input_sync(st->ts_input); |
384 | } else if (status & AT91_ADC_EOC(3)) { | 384 | } else if (status & AT91_ADC_EOC(3) && st->ts_input) { |
385 | /* Conversion finished */ | 385 | /* Conversion finished and we've a touchscreen */ |
386 | if (st->ts_bufferedmeasure) { | 386 | if (st->ts_bufferedmeasure) { |
387 | /* | 387 | /* |
388 | * Last measurement is always discarded, since it can | 388 | * Last measurement is always discarded, since it can |
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index f9ad6c2d6821..85d701291654 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/of_device.h> | 21 | #include <linux/of_device.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
24 | #include <linux/delay.h> | ||
25 | #include <linux/reset.h> | ||
24 | #include <linux/regulator/consumer.h> | 26 | #include <linux/regulator/consumer.h> |
25 | #include <linux/iio/iio.h> | 27 | #include <linux/iio/iio.h> |
26 | 28 | ||
@@ -53,6 +55,7 @@ struct rockchip_saradc { | |||
53 | struct clk *clk; | 55 | struct clk *clk; |
54 | struct completion completion; | 56 | struct completion completion; |
55 | struct regulator *vref; | 57 | struct regulator *vref; |
58 | struct reset_control *reset; | ||
56 | const struct rockchip_saradc_data *data; | 59 | const struct rockchip_saradc_data *data; |
57 | u16 last_val; | 60 | u16 last_val; |
58 | }; | 61 | }; |
@@ -190,6 +193,16 @@ static const struct of_device_id rockchip_saradc_match[] = { | |||
190 | }; | 193 | }; |
191 | MODULE_DEVICE_TABLE(of, rockchip_saradc_match); | 194 | MODULE_DEVICE_TABLE(of, rockchip_saradc_match); |
192 | 195 | ||
196 | /** | ||
197 | * Reset SARADC Controller. | ||
198 | */ | ||
199 | static void rockchip_saradc_reset_controller(struct reset_control *reset) | ||
200 | { | ||
201 | reset_control_assert(reset); | ||
202 | usleep_range(10, 20); | ||
203 | reset_control_deassert(reset); | ||
204 | } | ||
205 | |||
193 | static int rockchip_saradc_probe(struct platform_device *pdev) | 206 | static int rockchip_saradc_probe(struct platform_device *pdev) |
194 | { | 207 | { |
195 | struct rockchip_saradc *info = NULL; | 208 | struct rockchip_saradc *info = NULL; |
@@ -218,6 +231,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev) | |||
218 | if (IS_ERR(info->regs)) | 231 | if (IS_ERR(info->regs)) |
219 | return PTR_ERR(info->regs); | 232 | return PTR_ERR(info->regs); |
220 | 233 | ||
234 | /* | ||
235 | * The reset should be an optional property, as it should work | ||
236 | * with old devicetrees as well | ||
237 | */ | ||
238 | info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb"); | ||
239 | if (IS_ERR(info->reset)) { | ||
240 | ret = PTR_ERR(info->reset); | ||
241 | if (ret != -ENOENT) | ||
242 | return ret; | ||
243 | |||
244 | dev_dbg(&pdev->dev, "no reset control found\n"); | ||
245 | info->reset = NULL; | ||
246 | } | ||
247 | |||
221 | init_completion(&info->completion); | 248 | init_completion(&info->completion); |
222 | 249 | ||
223 | irq = platform_get_irq(pdev, 0); | 250 | irq = platform_get_irq(pdev, 0); |
@@ -252,6 +279,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev) | |||
252 | return PTR_ERR(info->vref); | 279 | return PTR_ERR(info->vref); |
253 | } | 280 | } |
254 | 281 | ||
282 | if (info->reset) | ||
283 | rockchip_saradc_reset_controller(info->reset); | ||
284 | |||
255 | /* | 285 | /* |
256 | * Use a default value for the converter clock. | 286 | * Use a default value for the converter clock. |
257 | * This may become user-configurable in the future. | 287 | * This may become user-configurable in the future. |
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 1ef398770a1f..066abaf80201 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c | |||
@@ -489,7 +489,8 @@ static struct iio_info ads1115_info = { | |||
489 | #ifdef CONFIG_OF | 489 | #ifdef CONFIG_OF |
490 | static int ads1015_get_channels_config_of(struct i2c_client *client) | 490 | static int ads1015_get_channels_config_of(struct i2c_client *client) |
491 | { | 491 | { |
492 | struct ads1015_data *data = i2c_get_clientdata(client); | 492 | struct iio_dev *indio_dev = i2c_get_clientdata(client); |
493 | struct ads1015_data *data = iio_priv(indio_dev); | ||
493 | struct device_node *node; | 494 | struct device_node *node; |
494 | 495 | ||
495 | if (!client->dev.of_node || | 496 | if (!client->dev.of_node || |
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 8a368756881b..c3cfacca2541 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
@@ -32,6 +32,7 @@ | |||
32 | 32 | ||
33 | struct tiadc_device { | 33 | struct tiadc_device { |
34 | struct ti_tscadc_dev *mfd_tscadc; | 34 | struct ti_tscadc_dev *mfd_tscadc; |
35 | struct mutex fifo1_lock; /* to protect fifo access */ | ||
35 | int channels; | 36 | int channels; |
36 | u8 channel_line[8]; | 37 | u8 channel_line[8]; |
37 | u8 channel_step[8]; | 38 | u8 channel_step[8]; |
@@ -359,6 +360,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
359 | int *val, int *val2, long mask) | 360 | int *val, int *val2, long mask) |
360 | { | 361 | { |
361 | struct tiadc_device *adc_dev = iio_priv(indio_dev); | 362 | struct tiadc_device *adc_dev = iio_priv(indio_dev); |
363 | int ret = IIO_VAL_INT; | ||
362 | int i, map_val; | 364 | int i, map_val; |
363 | unsigned int fifo1count, read, stepid; | 365 | unsigned int fifo1count, read, stepid; |
364 | bool found = false; | 366 | bool found = false; |
@@ -372,13 +374,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
372 | if (!step_en) | 374 | if (!step_en) |
373 | return -EINVAL; | 375 | return -EINVAL; |
374 | 376 | ||
377 | mutex_lock(&adc_dev->fifo1_lock); | ||
375 | fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); | 378 | fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); |
376 | while (fifo1count--) | 379 | while (fifo1count--) |
377 | tiadc_readl(adc_dev, REG_FIFO1); | 380 | tiadc_readl(adc_dev, REG_FIFO1); |
378 | 381 | ||
379 | am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en); | 382 | am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en); |
380 | 383 | ||
381 | timeout = jiffies + usecs_to_jiffies | 384 | timeout = jiffies + msecs_to_jiffies |
382 | (IDLE_TIMEOUT * adc_dev->channels); | 385 | (IDLE_TIMEOUT * adc_dev->channels); |
383 | /* Wait for Fifo threshold interrupt */ | 386 | /* Wait for Fifo threshold interrupt */ |
384 | while (1) { | 387 | while (1) { |
@@ -388,7 +391,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
388 | 391 | ||
389 | if (time_after(jiffies, timeout)) { | 392 | if (time_after(jiffies, timeout)) { |
390 | am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); | 393 | am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); |
391 | return -EAGAIN; | 394 | ret = -EAGAIN; |
395 | goto err_unlock; | ||
392 | } | 396 | } |
393 | } | 397 | } |
394 | map_val = adc_dev->channel_step[chan->scan_index]; | 398 | map_val = adc_dev->channel_step[chan->scan_index]; |
@@ -414,8 +418,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
414 | am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); | 418 | am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); |
415 | 419 | ||
416 | if (found == false) | 420 | if (found == false) |
417 | return -EBUSY; | 421 | ret = -EBUSY; |
418 | return IIO_VAL_INT; | 422 | |
423 | err_unlock: | ||
424 | mutex_unlock(&adc_dev->fifo1_lock); | ||
425 | return ret; | ||
419 | } | 426 | } |
420 | 427 | ||
421 | static const struct iio_info tiadc_info = { | 428 | static const struct iio_info tiadc_info = { |
@@ -483,6 +490,7 @@ static int tiadc_probe(struct platform_device *pdev) | |||
483 | 490 | ||
484 | tiadc_step_config(indio_dev); | 491 | tiadc_step_config(indio_dev); |
485 | tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); | 492 | tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); |
493 | mutex_init(&adc_dev->fifo1_lock); | ||
486 | 494 | ||
487 | err = tiadc_channel_init(indio_dev, adc_dev->channels); | 495 | err = tiadc_channel_init(indio_dev, adc_dev->channels); |
488 | if (err < 0) | 496 | if (err < 0) |
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c index ae038a59d256..407f141a1eee 100644 --- a/drivers/iio/chemical/atlas-ph-sensor.c +++ b/drivers/iio/chemical/atlas-ph-sensor.c | |||
@@ -434,7 +434,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev, | |||
434 | break; | 434 | break; |
435 | case IIO_ELECTRICALCONDUCTIVITY: | 435 | case IIO_ELECTRICALCONDUCTIVITY: |
436 | *val = 1; /* 0.00001 */ | 436 | *val = 1; /* 0.00001 */ |
437 | *val = 100000; | 437 | *val2 = 100000; |
438 | break; | 438 | break; |
439 | case IIO_CONCENTRATION: | 439 | case IIO_CONCENTRATION: |
440 | *val = 0; /* 0.000000001 */ | 440 | *val = 0; /* 0.000000001 */ |
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index e81f434760f4..dc33c1dd5191 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c | |||
@@ -56,8 +56,8 @@ static struct { | |||
56 | {HID_USAGE_SENSOR_ALS, 0, 1, 0}, | 56 | {HID_USAGE_SENSOR_ALS, 0, 1, 0}, |
57 | {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, | 57 | {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, |
58 | 58 | ||
59 | {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0}, | 59 | {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, |
60 | {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0}, | 60 | {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000}, |
61 | }; | 61 | }; |
62 | 62 | ||
63 | static int pow_10(unsigned power) | 63 | static int pow_10(unsigned power) |
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c index 792a97164cb2..bebbd00304ce 100644 --- a/drivers/iio/dac/stx104.c +++ b/drivers/iio/dac/stx104.c | |||
@@ -65,6 +65,16 @@ struct stx104_gpio { | |||
65 | unsigned int out_state; | 65 | unsigned int out_state; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | /** | ||
69 | * struct stx104_dev - STX104 device private data structure | ||
70 | * @indio_dev: IIO device | ||
71 | * @chip: instance of the gpio_chip | ||
72 | */ | ||
73 | struct stx104_dev { | ||
74 | struct iio_dev *indio_dev; | ||
75 | struct gpio_chip *chip; | ||
76 | }; | ||
77 | |||
68 | static int stx104_read_raw(struct iio_dev *indio_dev, | 78 | static int stx104_read_raw(struct iio_dev *indio_dev, |
69 | struct iio_chan_spec const *chan, int *val, int *val2, long mask) | 79 | struct iio_chan_spec const *chan, int *val, int *val2, long mask) |
70 | { | 80 | { |
@@ -107,6 +117,7 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = { | |||
107 | static int stx104_gpio_get_direction(struct gpio_chip *chip, | 117 | static int stx104_gpio_get_direction(struct gpio_chip *chip, |
108 | unsigned int offset) | 118 | unsigned int offset) |
109 | { | 119 | { |
120 | /* GPIO 0-3 are input only, while the rest are output only */ | ||
110 | if (offset < 4) | 121 | if (offset < 4) |
111 | return 1; | 122 | return 1; |
112 | 123 | ||
@@ -169,6 +180,7 @@ static int stx104_probe(struct device *dev, unsigned int id) | |||
169 | struct iio_dev *indio_dev; | 180 | struct iio_dev *indio_dev; |
170 | struct stx104_iio *priv; | 181 | struct stx104_iio *priv; |
171 | struct stx104_gpio *stx104gpio; | 182 | struct stx104_gpio *stx104gpio; |
183 | struct stx104_dev *stx104dev; | ||
172 | int err; | 184 | int err; |
173 | 185 | ||
174 | indio_dev = devm_iio_device_alloc(dev, sizeof(*priv)); | 186 | indio_dev = devm_iio_device_alloc(dev, sizeof(*priv)); |
@@ -179,6 +191,10 @@ static int stx104_probe(struct device *dev, unsigned int id) | |||
179 | if (!stx104gpio) | 191 | if (!stx104gpio) |
180 | return -ENOMEM; | 192 | return -ENOMEM; |
181 | 193 | ||
194 | stx104dev = devm_kzalloc(dev, sizeof(*stx104dev), GFP_KERNEL); | ||
195 | if (!stx104dev) | ||
196 | return -ENOMEM; | ||
197 | |||
182 | if (!devm_request_region(dev, base[id], STX104_EXTENT, | 198 | if (!devm_request_region(dev, base[id], STX104_EXTENT, |
183 | dev_name(dev))) { | 199 | dev_name(dev))) { |
184 | dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", | 200 | dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", |
@@ -199,12 +215,6 @@ static int stx104_probe(struct device *dev, unsigned int id) | |||
199 | outw(0, base[id] + 4); | 215 | outw(0, base[id] + 4); |
200 | outw(0, base[id] + 6); | 216 | outw(0, base[id] + 6); |
201 | 217 | ||
202 | err = devm_iio_device_register(dev, indio_dev); | ||
203 | if (err) { | ||
204 | dev_err(dev, "IIO device registering failed (%d)\n", err); | ||
205 | return err; | ||
206 | } | ||
207 | |||
208 | stx104gpio->chip.label = dev_name(dev); | 218 | stx104gpio->chip.label = dev_name(dev); |
209 | stx104gpio->chip.parent = dev; | 219 | stx104gpio->chip.parent = dev; |
210 | stx104gpio->chip.owner = THIS_MODULE; | 220 | stx104gpio->chip.owner = THIS_MODULE; |
@@ -220,7 +230,9 @@ static int stx104_probe(struct device *dev, unsigned int id) | |||
220 | 230 | ||
221 | spin_lock_init(&stx104gpio->lock); | 231 | spin_lock_init(&stx104gpio->lock); |
222 | 232 | ||
223 | dev_set_drvdata(dev, stx104gpio); | 233 | stx104dev->indio_dev = indio_dev; |
234 | stx104dev->chip = &stx104gpio->chip; | ||
235 | dev_set_drvdata(dev, stx104dev); | ||
224 | 236 | ||
225 | err = gpiochip_add_data(&stx104gpio->chip, stx104gpio); | 237 | err = gpiochip_add_data(&stx104gpio->chip, stx104gpio); |
226 | if (err) { | 238 | if (err) { |
@@ -228,14 +240,22 @@ static int stx104_probe(struct device *dev, unsigned int id) | |||
228 | return err; | 240 | return err; |
229 | } | 241 | } |
230 | 242 | ||
243 | err = iio_device_register(indio_dev); | ||
244 | if (err) { | ||
245 | dev_err(dev, "IIO device registering failed (%d)\n", err); | ||
246 | gpiochip_remove(&stx104gpio->chip); | ||
247 | return err; | ||
248 | } | ||
249 | |||
231 | return 0; | 250 | return 0; |
232 | } | 251 | } |
233 | 252 | ||
234 | static int stx104_remove(struct device *dev, unsigned int id) | 253 | static int stx104_remove(struct device *dev, unsigned int id) |
235 | { | 254 | { |
236 | struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev); | 255 | struct stx104_dev *const stx104dev = dev_get_drvdata(dev); |
237 | 256 | ||
238 | gpiochip_remove(&stx104gpio->chip); | 257 | iio_device_unregister(stx104dev->indio_dev); |
258 | gpiochip_remove(stx104dev->chip); | ||
239 | 259 | ||
240 | return 0; | 260 | return 0; |
241 | } | 261 | } |
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig index 738a86d9e4a9..d04124345992 100644 --- a/drivers/iio/humidity/Kconfig +++ b/drivers/iio/humidity/Kconfig | |||
@@ -6,6 +6,8 @@ menu "Humidity sensors" | |||
6 | config AM2315 | 6 | config AM2315 |
7 | tristate "Aosong AM2315 relative humidity and temperature sensor" | 7 | tristate "Aosong AM2315 relative humidity and temperature sensor" |
8 | depends on I2C | 8 | depends on I2C |
9 | select IIO_BUFFER | ||
10 | select IIO_TRIGGERED_BUFFER | ||
9 | help | 11 | help |
10 | If you say yes here you get support for the Aosong AM2315 | 12 | If you say yes here you get support for the Aosong AM2315 |
11 | relative humidity and ambient temperature sensor. | 13 | relative humidity and ambient temperature sensor. |
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c index 3e200f69e886..ff96b6d0fdae 100644 --- a/drivers/iio/humidity/am2315.c +++ b/drivers/iio/humidity/am2315.c | |||
@@ -244,7 +244,7 @@ static int am2315_probe(struct i2c_client *client, | |||
244 | indio_dev->channels = am2315_channels; | 244 | indio_dev->channels = am2315_channels; |
245 | indio_dev->num_channels = ARRAY_SIZE(am2315_channels); | 245 | indio_dev->num_channels = ARRAY_SIZE(am2315_channels); |
246 | 246 | ||
247 | ret = iio_triggered_buffer_setup(indio_dev, NULL, | 247 | ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time, |
248 | am2315_trigger_handler, NULL); | 248 | am2315_trigger_handler, NULL); |
249 | if (ret < 0) { | 249 | if (ret < 0) { |
250 | dev_err(&client->dev, "iio triggered buffer setup failed\n"); | 250 | dev_err(&client->dev, "iio triggered buffer setup failed\n"); |
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index a03832a5fc95..e0c9c70c2a4a 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c | |||
@@ -142,7 +142,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, | |||
142 | struct i2c_client *client = data->client; | 142 | struct i2c_client *client = data->client; |
143 | int delay = data->adc_int_us[chan->address]; | 143 | int delay = data->adc_int_us[chan->address]; |
144 | int ret; | 144 | int ret; |
145 | int val; | 145 | __be16 val; |
146 | 146 | ||
147 | /* start measurement */ | 147 | /* start measurement */ |
148 | ret = i2c_smbus_write_byte(client, chan->address); | 148 | ret = i2c_smbus_write_byte(client, chan->address); |
@@ -154,26 +154,13 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, | |||
154 | /* wait for integration time to pass */ | 154 | /* wait for integration time to pass */ |
155 | usleep_range(delay, delay + 1000); | 155 | usleep_range(delay, delay + 1000); |
156 | 156 | ||
157 | /* | 157 | /* read measurement */ |
158 | * i2c_smbus_read_word_data cannot() be used here due to the command | 158 | ret = i2c_master_recv(data->client, (char *)&val, sizeof(val)); |
159 | * value not being understood and causes NAKs preventing any reading | ||
160 | * from being accessed. | ||
161 | */ | ||
162 | ret = i2c_smbus_read_byte(client); | ||
163 | if (ret < 0) { | 159 | if (ret < 0) { |
164 | dev_err(&client->dev, "cannot read high byte measurement"); | 160 | dev_err(&client->dev, "cannot read sensor data\n"); |
165 | return ret; | 161 | return ret; |
166 | } | 162 | } |
167 | val = ret << 8; | 163 | return be16_to_cpu(val); |
168 | |||
169 | ret = i2c_smbus_read_byte(client); | ||
170 | if (ret < 0) { | ||
171 | dev_err(&client->dev, "cannot read low byte measurement"); | ||
172 | return ret; | ||
173 | } | ||
174 | val |= ret; | ||
175 | |||
176 | return val; | ||
177 | } | 164 | } |
178 | 165 | ||
179 | static int hdc100x_get_heater_status(struct hdc100x_data *data) | 166 | static int hdc100x_get_heater_status(struct hdc100x_data *data) |
@@ -272,8 +259,8 @@ static int hdc100x_probe(struct i2c_client *client, | |||
272 | struct iio_dev *indio_dev; | 259 | struct iio_dev *indio_dev; |
273 | struct hdc100x_data *data; | 260 | struct hdc100x_data *data; |
274 | 261 | ||
275 | if (!i2c_check_functionality(client->adapter, | 262 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA | |
276 | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE)) | 263 | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C)) |
277 | return -EOPNOTSUPP; | 264 | return -EOPNOTSUPP; |
278 | 265 | ||
279 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); | 266 | indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); |
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 90462fcf5436..158aaf44dd95 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c | |||
@@ -107,9 +107,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, | |||
107 | { | 107 | { |
108 | struct iio_dev *indio_dev = filp->private_data; | 108 | struct iio_dev *indio_dev = filp->private_data; |
109 | struct iio_buffer *rb = indio_dev->buffer; | 109 | struct iio_buffer *rb = indio_dev->buffer; |
110 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
110 | size_t datum_size; | 111 | size_t datum_size; |
111 | size_t to_wait; | 112 | size_t to_wait; |
112 | int ret; | 113 | int ret = 0; |
113 | 114 | ||
114 | if (!indio_dev->info) | 115 | if (!indio_dev->info) |
115 | return -ENODEV; | 116 | return -ENODEV; |
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, | |||
131 | else | 132 | else |
132 | to_wait = min_t(size_t, n / datum_size, rb->watermark); | 133 | to_wait = min_t(size_t, n / datum_size, rb->watermark); |
133 | 134 | ||
135 | add_wait_queue(&rb->pollq, &wait); | ||
134 | do { | 136 | do { |
135 | ret = wait_event_interruptible(rb->pollq, | 137 | if (!indio_dev->info) { |
136 | iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)); | 138 | ret = -ENODEV; |
137 | if (ret) | 139 | break; |
138 | return ret; | 140 | } |
139 | 141 | ||
140 | if (!indio_dev->info) | 142 | if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { |
141 | return -ENODEV; | 143 | if (signal_pending(current)) { |
144 | ret = -ERESTARTSYS; | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | wait_woken(&wait, TASK_INTERRUPTIBLE, | ||
149 | MAX_SCHEDULE_TIMEOUT); | ||
150 | continue; | ||
151 | } | ||
142 | 152 | ||
143 | ret = rb->access->read_first_n(rb, n, buf); | 153 | ret = rb->access->read_first_n(rb, n, buf); |
144 | if (ret == 0 && (filp->f_flags & O_NONBLOCK)) | 154 | if (ret == 0 && (filp->f_flags & O_NONBLOCK)) |
145 | ret = -EAGAIN; | 155 | ret = -EAGAIN; |
146 | } while (ret == 0); | 156 | } while (ret == 0); |
157 | remove_wait_queue(&rb->pollq, &wait); | ||
147 | 158 | ||
148 | return ret; | 159 | return ret; |
149 | } | 160 | } |
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index f914d5d140e4..d2b889918c3e 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c | |||
@@ -613,9 +613,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) | |||
613 | return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); | 613 | return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); |
614 | case IIO_VAL_FRACTIONAL: | 614 | case IIO_VAL_FRACTIONAL: |
615 | tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); | 615 | tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); |
616 | vals[1] = do_div(tmp, 1000000000LL); | 616 | vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]); |
617 | vals[0] = tmp; | 617 | return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1])); |
618 | return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); | ||
619 | case IIO_VAL_FRACTIONAL_LOG2: | 618 | case IIO_VAL_FRACTIONAL_LOG2: |
620 | tmp = (s64)vals[0] * 1000000000LL >> vals[1]; | 619 | tmp = (s64)vals[0] * 1000000000LL >> vals[1]; |
621 | vals[1] = do_div(tmp, 1000000000LL); | 620 | vals[1] = do_div(tmp, 1000000000LL); |
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 7c566f516572..3574945183fe 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig | |||
@@ -76,7 +76,6 @@ config BH1750 | |||
76 | config BH1780 | 76 | config BH1780 |
77 | tristate "ROHM BH1780 ambient light sensor" | 77 | tristate "ROHM BH1780 ambient light sensor" |
78 | depends on I2C | 78 | depends on I2C |
79 | depends on !SENSORS_BH1780 | ||
80 | help | 79 | help |
81 | Say Y here to build support for the ROHM BH1780GLI ambient | 80 | Say Y here to build support for the ROHM BH1780GLI ambient |
82 | light sensor. | 81 | light sensor. |
@@ -238,6 +237,8 @@ config MAX44000 | |||
238 | tristate "MAX44000 Ambient and Infrared Proximity Sensor" | 237 | tristate "MAX44000 Ambient and Infrared Proximity Sensor" |
239 | depends on I2C | 238 | depends on I2C |
240 | select REGMAP_I2C | 239 | select REGMAP_I2C |
240 | select IIO_BUFFER | ||
241 | select IIO_TRIGGERED_BUFFER | ||
241 | help | 242 | help |
242 | Say Y here if you want to build support for Maxim Integrated's | 243 | Say Y here if you want to build support for Maxim Integrated's |
243 | MAX44000 ambient and infrared proximity sensor device. | 244 | MAX44000 ambient and infrared proximity sensor device. |
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 6943688e66df..e5a533cbd53f 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c | |||
@@ -970,7 +970,7 @@ int bmp280_common_probe(struct device *dev, | |||
970 | data->vdda = devm_regulator_get(dev, "vdda"); | 970 | data->vdda = devm_regulator_get(dev, "vdda"); |
971 | if (IS_ERR(data->vdda)) { | 971 | if (IS_ERR(data->vdda)) { |
972 | dev_err(dev, "failed to get VDDA regulator\n"); | 972 | dev_err(dev, "failed to get VDDA regulator\n"); |
973 | ret = PTR_ERR(data->vddd); | 973 | ret = PTR_ERR(data->vdda); |
974 | goto out_disable_vddd; | 974 | goto out_disable_vddd; |
975 | } | 975 | } |
976 | ret = regulator_enable(data->vdda); | 976 | ret = regulator_enable(data->vdda); |
@@ -1079,7 +1079,8 @@ EXPORT_SYMBOL(bmp280_common_remove); | |||
1079 | #ifdef CONFIG_PM | 1079 | #ifdef CONFIG_PM |
1080 | static int bmp280_runtime_suspend(struct device *dev) | 1080 | static int bmp280_runtime_suspend(struct device *dev) |
1081 | { | 1081 | { |
1082 | struct bmp280_data *data = dev_get_drvdata(dev); | 1082 | struct iio_dev *indio_dev = dev_get_drvdata(dev); |
1083 | struct bmp280_data *data = iio_priv(indio_dev); | ||
1083 | int ret; | 1084 | int ret; |
1084 | 1085 | ||
1085 | ret = regulator_disable(data->vdda); | 1086 | ret = regulator_disable(data->vdda); |
@@ -1090,7 +1091,8 @@ static int bmp280_runtime_suspend(struct device *dev) | |||
1090 | 1091 | ||
1091 | static int bmp280_runtime_resume(struct device *dev) | 1092 | static int bmp280_runtime_resume(struct device *dev) |
1092 | { | 1093 | { |
1093 | struct bmp280_data *data = dev_get_drvdata(dev); | 1094 | struct iio_dev *indio_dev = dev_get_drvdata(dev); |
1095 | struct bmp280_data *data = iio_priv(indio_dev); | ||
1094 | int ret; | 1096 | int ret; |
1095 | 1097 | ||
1096 | ret = regulator_enable(data->vddd); | 1098 | ret = regulator_enable(data->vddd); |
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index 2e3a70e1b245..5656deb17261 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c | |||
@@ -397,7 +397,7 @@ static int as3935_probe(struct spi_device *spi) | |||
397 | return ret; | 397 | return ret; |
398 | } | 398 | } |
399 | 399 | ||
400 | ret = iio_triggered_buffer_setup(indio_dev, NULL, | 400 | ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time, |
401 | &as3935_trigger_handler, NULL); | 401 | &as3935_trigger_handler, NULL); |
402 | 402 | ||
403 | if (ret) { | 403 | if (ret) { |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e6dfa1bd3def..5f65a78b27c9 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2462,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2462 | 2462 | ||
2463 | if (addr->dev_addr.bound_dev_if) { | 2463 | if (addr->dev_addr.bound_dev_if) { |
2464 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | 2464 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); |
2465 | if (!ndev) | 2465 | if (!ndev) { |
2466 | return -ENODEV; | 2466 | ret = -ENODEV; |
2467 | goto err2; | ||
2468 | } | ||
2467 | 2469 | ||
2468 | if (ndev->flags & IFF_LOOPBACK) { | 2470 | if (ndev->flags & IFF_LOOPBACK) { |
2469 | dev_put(ndev); | 2471 | dev_put(ndev); |
2470 | if (!id_priv->id.device->get_netdev) | 2472 | if (!id_priv->id.device->get_netdev) { |
2471 | return -EOPNOTSUPP; | 2473 | ret = -EOPNOTSUPP; |
2474 | goto err2; | ||
2475 | } | ||
2472 | 2476 | ||
2473 | ndev = id_priv->id.device->get_netdev(id_priv->id.device, | 2477 | ndev = id_priv->id.device->get_netdev(id_priv->id.device, |
2474 | id_priv->id.port_num); | 2478 | id_priv->id.port_num); |
2475 | if (!ndev) | 2479 | if (!ndev) { |
2476 | return -ENODEV; | 2480 | ret = -ENODEV; |
2481 | goto err2; | ||
2482 | } | ||
2477 | } | 2483 | } |
2478 | 2484 | ||
2479 | route->path_rec->net = &init_net; | 2485 | route->path_rec->net = &init_net; |
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 3a3c5d73bbfc..51c79b2fb0b8 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
@@ -106,7 +106,6 @@ struct mcast_group { | |||
106 | atomic_t refcount; | 106 | atomic_t refcount; |
107 | enum mcast_group_state state; | 107 | enum mcast_group_state state; |
108 | struct ib_sa_query *query; | 108 | struct ib_sa_query *query; |
109 | int query_id; | ||
110 | u16 pkey_index; | 109 | u16 pkey_index; |
111 | u8 leave_state; | 110 | u8 leave_state; |
112 | int retries; | 111 | int retries; |
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member) | |||
340 | member->multicast.comp_mask, | 339 | member->multicast.comp_mask, |
341 | 3000, GFP_KERNEL, join_handler, group, | 340 | 3000, GFP_KERNEL, join_handler, group, |
342 | &group->query); | 341 | &group->query); |
343 | if (ret >= 0) { | 342 | return (ret > 0) ? 0 : ret; |
344 | group->query_id = ret; | ||
345 | ret = 0; | ||
346 | } | ||
347 | return ret; | ||
348 | } | 343 | } |
349 | 344 | ||
350 | static int send_leave(struct mcast_group *group, u8 leave_state) | 345 | static int send_leave(struct mcast_group *group, u8 leave_state) |
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) | |||
364 | IB_SA_MCMEMBER_REC_JOIN_STATE, | 359 | IB_SA_MCMEMBER_REC_JOIN_STATE, |
365 | 3000, GFP_KERNEL, leave_handler, | 360 | 3000, GFP_KERNEL, leave_handler, |
366 | group, &group->query); | 361 | group, &group->query); |
367 | if (ret >= 0) { | 362 | return (ret > 0) ? 0 : ret; |
368 | group->query_id = ret; | ||
369 | ret = 0; | ||
370 | } | ||
371 | return ret; | ||
372 | } | 363 | } |
373 | 364 | ||
374 | static void join_group(struct mcast_group *group, struct mcast_member *member, | 365 | static void join_group(struct mcast_group *group, struct mcast_member *member, |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 3aca7f6171b4..80f988984f44 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -333,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep) | |||
333 | 333 | ||
334 | spin_lock_irqsave(&ep->com.dev->lock, flags); | 334 | spin_lock_irqsave(&ep->com.dev->lock, flags); |
335 | _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); | 335 | _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); |
336 | if (idr_is_empty(&ep->com.dev->hwtid_idr)) | ||
337 | wake_up(&ep->com.dev->wait); | ||
336 | spin_unlock_irqrestore(&ep->com.dev->lock, flags); | 338 | spin_unlock_irqrestore(&ep->com.dev->lock, flags); |
337 | } | 339 | } |
338 | 340 | ||
@@ -1827,8 +1829,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1827 | (ep->mpa_pkt + sizeof(*mpa)); | 1829 | (ep->mpa_pkt + sizeof(*mpa)); |
1828 | ep->ird = ntohs(mpa_v2_params->ird) & | 1830 | ep->ird = ntohs(mpa_v2_params->ird) & |
1829 | MPA_V2_IRD_ORD_MASK; | 1831 | MPA_V2_IRD_ORD_MASK; |
1832 | ep->ird = min_t(u32, ep->ird, | ||
1833 | cur_max_read_depth(ep->com.dev)); | ||
1830 | ep->ord = ntohs(mpa_v2_params->ord) & | 1834 | ep->ord = ntohs(mpa_v2_params->ord) & |
1831 | MPA_V2_IRD_ORD_MASK; | 1835 | MPA_V2_IRD_ORD_MASK; |
1836 | ep->ord = min_t(u32, ep->ord, | ||
1837 | cur_max_read_depth(ep->com.dev)); | ||
1832 | PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, | 1838 | PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, |
1833 | ep->ord); | 1839 | ep->ord); |
1834 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) | 1840 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) |
@@ -2113,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, | |||
2113 | } | 2119 | } |
2114 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, | 2120 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, |
2115 | n, pdev, rt_tos2priority(tos)); | 2121 | n, pdev, rt_tos2priority(tos)); |
2116 | if (!ep->l2t) | 2122 | if (!ep->l2t) { |
2123 | dev_put(pdev); | ||
2117 | goto out; | 2124 | goto out; |
2125 | } | ||
2118 | ep->mtu = pdev->mtu; | 2126 | ep->mtu = pdev->mtu; |
2119 | ep->tx_chan = cxgb4_port_chan(pdev); | 2127 | ep->tx_chan = cxgb4_port_chan(pdev); |
2120 | ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, | 2128 | ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, |
@@ -3136,7 +3144,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3136 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | 3144 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
3137 | if (conn_param->ord > ep->ird) { | 3145 | if (conn_param->ord > ep->ird) { |
3138 | if (RELAXED_IRD_NEGOTIATION) { | 3146 | if (RELAXED_IRD_NEGOTIATION) { |
3139 | ep->ord = ep->ird; | 3147 | conn_param->ord = ep->ird; |
3140 | } else { | 3148 | } else { |
3141 | ep->ird = conn_param->ird; | 3149 | ep->ird = conn_param->ird; |
3142 | ep->ord = conn_param->ord; | 3150 | ep->ord = conn_param->ord; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 812ab7278b8e..ac926c942fee 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) | |||
1016 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 1016 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |
1017 | { | 1017 | { |
1018 | struct c4iw_cq *chp; | 1018 | struct c4iw_cq *chp; |
1019 | int ret; | 1019 | int ret = 0; |
1020 | unsigned long flag; | 1020 | unsigned long flag; |
1021 | 1021 | ||
1022 | chp = to_c4iw_cq(ibcq); | 1022 | chp = to_c4iw_cq(ibcq); |
1023 | spin_lock_irqsave(&chp->lock, flag); | 1023 | spin_lock_irqsave(&chp->lock, flag); |
1024 | ret = t4_arm_cq(&chp->cq, | 1024 | t4_arm_cq(&chp->cq, |
1025 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); | 1025 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); |
1026 | if (flags & IB_CQ_REPORT_MISSED_EVENTS) | ||
1027 | ret = t4_cq_notempty(&chp->cq); | ||
1026 | spin_unlock_irqrestore(&chp->lock, flag); | 1028 | spin_unlock_irqrestore(&chp->lock, flag); |
1027 | if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) | ||
1028 | ret = 0; | ||
1029 | return ret; | 1029 | return ret; |
1030 | } | 1030 | } |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 071d7332ec06..3c4b2126e0d1 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) | |||
872 | static void c4iw_dealloc(struct uld_ctx *ctx) | 872 | static void c4iw_dealloc(struct uld_ctx *ctx) |
873 | { | 873 | { |
874 | c4iw_rdev_close(&ctx->dev->rdev); | 874 | c4iw_rdev_close(&ctx->dev->rdev); |
875 | WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr)); | ||
875 | idr_destroy(&ctx->dev->cqidr); | 876 | idr_destroy(&ctx->dev->cqidr); |
877 | WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr)); | ||
876 | idr_destroy(&ctx->dev->qpidr); | 878 | idr_destroy(&ctx->dev->qpidr); |
879 | WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr)); | ||
877 | idr_destroy(&ctx->dev->mmidr); | 880 | idr_destroy(&ctx->dev->mmidr); |
881 | wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr)); | ||
878 | idr_destroy(&ctx->dev->hwtid_idr); | 882 | idr_destroy(&ctx->dev->hwtid_idr); |
879 | idr_destroy(&ctx->dev->stid_idr); | 883 | idr_destroy(&ctx->dev->stid_idr); |
880 | idr_destroy(&ctx->dev->atid_idr); | 884 | idr_destroy(&ctx->dev->atid_idr); |
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
992 | mutex_init(&devp->rdev.stats.lock); | 996 | mutex_init(&devp->rdev.stats.lock); |
993 | mutex_init(&devp->db_mutex); | 997 | mutex_init(&devp->db_mutex); |
994 | INIT_LIST_HEAD(&devp->db_fc_list); | 998 | INIT_LIST_HEAD(&devp->db_fc_list); |
999 | init_waitqueue_head(&devp->wait); | ||
995 | devp->avail_ird = devp->rdev.lldi.max_ird_adapter; | 1000 | devp->avail_ird = devp->rdev.lldi.max_ird_adapter; |
996 | 1001 | ||
997 | if (c4iw_debugfs_root) { | 1002 | if (c4iw_debugfs_root) { |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index aa47e0ae80bc..4b83b84f7ddf 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -263,6 +263,7 @@ struct c4iw_dev { | |||
263 | struct idr stid_idr; | 263 | struct idr stid_idr; |
264 | struct list_head db_fc_list; | 264 | struct list_head db_fc_list; |
265 | u32 avail_ird; | 265 | u32 avail_ird; |
266 | wait_queue_head_t wait; | ||
266 | }; | 267 | }; |
267 | 268 | ||
268 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) | 269 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index edb1172b6f54..690435229be7 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, | |||
683 | return 0; | 683 | return 0; |
684 | } | 684 | } |
685 | 685 | ||
686 | void _free_qp(struct kref *kref) | 686 | static void _free_qp(struct kref *kref) |
687 | { | 687 | { |
688 | struct c4iw_qp *qhp; | 688 | struct c4iw_qp *qhp; |
689 | 689 | ||
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 6126bbe36095..02173f4315fa 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) | |||
634 | return (CQE_GENBIT(cqe) == cq->gen); | 634 | return (CQE_GENBIT(cqe) == cq->gen); |
635 | } | 635 | } |
636 | 636 | ||
637 | static inline int t4_cq_notempty(struct t4_cq *cq) | ||
638 | { | ||
639 | return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]); | ||
640 | } | ||
641 | |||
637 | static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) | 642 | static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) |
638 | { | 643 | { |
639 | int ret; | 644 | int ret; |
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 79575ee873f2..0566393e5aba 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <linux/topology.h> | 47 | #include <linux/topology.h> |
48 | #include <linux/cpumask.h> | 48 | #include <linux/cpumask.h> |
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/cpumask.h> | ||
51 | 50 | ||
52 | #include "hfi.h" | 51 | #include "hfi.h" |
53 | #include "affinity.h" | 52 | #include "affinity.h" |
@@ -682,7 +681,7 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | |||
682 | size_t count) | 681 | size_t count) |
683 | { | 682 | { |
684 | struct hfi1_affinity_node *entry; | 683 | struct hfi1_affinity_node *entry; |
685 | struct cpumask mask; | 684 | cpumask_var_t mask; |
686 | int ret, i; | 685 | int ret, i; |
687 | 686 | ||
688 | spin_lock(&node_affinity.lock); | 687 | spin_lock(&node_affinity.lock); |
@@ -692,19 +691,24 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | |||
692 | if (!entry) | 691 | if (!entry) |
693 | return -EINVAL; | 692 | return -EINVAL; |
694 | 693 | ||
695 | ret = cpulist_parse(buf, &mask); | 694 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); |
695 | if (!ret) | ||
696 | return -ENOMEM; | ||
697 | |||
698 | ret = cpulist_parse(buf, mask); | ||
696 | if (ret) | 699 | if (ret) |
697 | return ret; | 700 | goto out; |
698 | 701 | ||
699 | if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) { | 702 | if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) { |
700 | dd_dev_warn(dd, "Invalid CPU mask\n"); | 703 | dd_dev_warn(dd, "Invalid CPU mask\n"); |
701 | return -EINVAL; | 704 | ret = -EINVAL; |
705 | goto out; | ||
702 | } | 706 | } |
703 | 707 | ||
704 | mutex_lock(&sdma_affinity_mutex); | 708 | mutex_lock(&sdma_affinity_mutex); |
705 | /* reset the SDMA interrupt affinity details */ | 709 | /* reset the SDMA interrupt affinity details */ |
706 | init_cpu_mask_set(&entry->def_intr); | 710 | init_cpu_mask_set(&entry->def_intr); |
707 | cpumask_copy(&entry->def_intr.mask, &mask); | 711 | cpumask_copy(&entry->def_intr.mask, mask); |
708 | /* | 712 | /* |
709 | * Reassign the affinity for each SDMA interrupt. | 713 | * Reassign the affinity for each SDMA interrupt. |
710 | */ | 714 | */ |
@@ -720,8 +724,9 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | |||
720 | if (ret) | 724 | if (ret) |
721 | break; | 725 | break; |
722 | } | 726 | } |
723 | |||
724 | mutex_unlock(&sdma_affinity_mutex); | 727 | mutex_unlock(&sdma_affinity_mutex); |
728 | out: | ||
729 | free_cpumask_var(mask); | ||
725 | return ret ? ret : strnlen(buf, PAGE_SIZE); | 730 | return ret ? ret : strnlen(buf, PAGE_SIZE); |
726 | } | 731 | } |
727 | 732 | ||
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index b32638d58ae8..cc38004cea42 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd) | |||
9490 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); | 9490 | write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); |
9491 | } | 9491 | } |
9492 | 9492 | ||
9493 | /* | ||
9494 | * Perform a test read on the QSFP. Return 0 on success, -ERRNO | ||
9495 | * on error. | ||
9496 | */ | ||
9497 | static int test_qsfp_read(struct hfi1_pportdata *ppd) | ||
9498 | { | ||
9499 | int ret; | ||
9500 | u8 status; | ||
9501 | |||
9502 | /* report success if not a QSFP */ | ||
9503 | if (ppd->port_type != PORT_TYPE_QSFP) | ||
9504 | return 0; | ||
9505 | |||
9506 | /* read byte 2, the status byte */ | ||
9507 | ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); | ||
9508 | if (ret < 0) | ||
9509 | return ret; | ||
9510 | if (ret != 1) | ||
9511 | return -EIO; | ||
9512 | |||
9513 | return 0; /* success */ | ||
9514 | } | ||
9515 | |||
9516 | /* | ||
9517 | * Values for QSFP retry. | ||
9518 | * | ||
9519 | * Give up after 10s (20 x 500ms). The overall timeout was empirically | ||
9520 | * arrived at from experience on a large cluster. | ||
9521 | */ | ||
9522 | #define MAX_QSFP_RETRIES 20 | ||
9523 | #define QSFP_RETRY_WAIT 500 /* msec */ | ||
9524 | |||
9525 | /* | ||
9526 | * Try a QSFP read. If it fails, schedule a retry for later. | ||
9527 | * Called on first link activation after driver load. | ||
9528 | */ | ||
9529 | static void try_start_link(struct hfi1_pportdata *ppd) | ||
9530 | { | ||
9531 | if (test_qsfp_read(ppd)) { | ||
9532 | /* read failed */ | ||
9533 | if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { | ||
9534 | dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); | ||
9535 | return; | ||
9536 | } | ||
9537 | dd_dev_info(ppd->dd, | ||
9538 | "QSFP not responding, waiting and retrying %d\n", | ||
9539 | (int)ppd->qsfp_retry_count); | ||
9540 | ppd->qsfp_retry_count++; | ||
9541 | queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work, | ||
9542 | msecs_to_jiffies(QSFP_RETRY_WAIT)); | ||
9543 | return; | ||
9544 | } | ||
9545 | ppd->qsfp_retry_count = 0; | ||
9546 | |||
9547 | /* | ||
9548 | * Tune the SerDes to a ballpark setting for optimal signal and bit | ||
9549 | * error rate. Needs to be done before starting the link. | ||
9550 | */ | ||
9551 | tune_serdes(ppd); | ||
9552 | start_link(ppd); | ||
9553 | } | ||
9554 | |||
9555 | /* | ||
9556 | * Workqueue function to start the link after a delay. | ||
9557 | */ | ||
9558 | void handle_start_link(struct work_struct *work) | ||
9559 | { | ||
9560 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, | ||
9561 | start_link_work.work); | ||
9562 | try_start_link(ppd); | ||
9563 | } | ||
9564 | |||
9493 | int bringup_serdes(struct hfi1_pportdata *ppd) | 9565 | int bringup_serdes(struct hfi1_pportdata *ppd) |
9494 | { | 9566 | { |
9495 | struct hfi1_devdata *dd = ppd->dd; | 9567 | struct hfi1_devdata *dd = ppd->dd; |
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd) | |||
9525 | set_qsfp_int_n(ppd, 1); | 9597 | set_qsfp_int_n(ppd, 1); |
9526 | } | 9598 | } |
9527 | 9599 | ||
9528 | /* | 9600 | try_start_link(ppd); |
9529 | * Tune the SerDes to a ballpark setting for | 9601 | return 0; |
9530 | * optimal signal and bit error rate | ||
9531 | * Needs to be done before starting the link | ||
9532 | */ | ||
9533 | tune_serdes(ppd); | ||
9534 | |||
9535 | return start_link(ppd); | ||
9536 | } | 9602 | } |
9537 | 9603 | ||
9538 | void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) | 9604 | void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) |
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) | |||
9549 | ppd->driver_link_ready = 0; | 9615 | ppd->driver_link_ready = 0; |
9550 | ppd->link_enabled = 0; | 9616 | ppd->link_enabled = 0; |
9551 | 9617 | ||
9618 | ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ | ||
9619 | flush_delayed_work(&ppd->start_link_work); | ||
9620 | cancel_delayed_work_sync(&ppd->start_link_work); | ||
9621 | |||
9552 | ppd->offline_disabled_reason = | 9622 | ppd->offline_disabled_reason = |
9553 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); | 9623 | HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); |
9554 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, | 9624 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, |
@@ -12865,7 +12935,7 @@ fail: | |||
12865 | */ | 12935 | */ |
12866 | static int set_up_context_variables(struct hfi1_devdata *dd) | 12936 | static int set_up_context_variables(struct hfi1_devdata *dd) |
12867 | { | 12937 | { |
12868 | int num_kernel_contexts; | 12938 | unsigned long num_kernel_contexts; |
12869 | int total_contexts; | 12939 | int total_contexts; |
12870 | int ret; | 12940 | int ret; |
12871 | unsigned ngroups; | 12941 | unsigned ngroups; |
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd) | |||
12894 | */ | 12964 | */ |
12895 | if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) { | 12965 | if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) { |
12896 | dd_dev_err(dd, | 12966 | dd_dev_err(dd, |
12897 | "Reducing # kernel rcv contexts to: %d, from %d\n", | 12967 | "Reducing # kernel rcv contexts to: %d, from %lu\n", |
12898 | (int)(dd->chip_send_contexts - num_vls - 1), | 12968 | (int)(dd->chip_send_contexts - num_vls - 1), |
12899 | (int)num_kernel_contexts); | 12969 | num_kernel_contexts); |
12900 | num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; | 12970 | num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; |
12901 | } | 12971 | } |
12902 | /* | 12972 | /* |
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index ed11107c50fe..e29573769efc 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work); | |||
706 | void handle_link_down(struct work_struct *work); | 706 | void handle_link_down(struct work_struct *work); |
707 | void handle_link_downgrade(struct work_struct *work); | 707 | void handle_link_downgrade(struct work_struct *work); |
708 | void handle_link_bounce(struct work_struct *work); | 708 | void handle_link_bounce(struct work_struct *work); |
709 | void handle_start_link(struct work_struct *work); | ||
709 | void handle_sma_message(struct work_struct *work); | 710 | void handle_sma_message(struct work_struct *work); |
710 | void reset_qsfp(struct hfi1_pportdata *ppd); | 711 | void reset_qsfp(struct hfi1_pportdata *ppd); |
711 | void qsfp_event(struct work_struct *work); | 712 | void qsfp_event(struct work_struct *work); |
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index dbab9d9cc288..5e9be16f6cd3 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c | |||
@@ -59,6 +59,40 @@ | |||
59 | 59 | ||
60 | static struct dentry *hfi1_dbg_root; | 60 | static struct dentry *hfi1_dbg_root; |
61 | 61 | ||
62 | /* wrappers to enforce srcu in seq file */ | ||
63 | static ssize_t hfi1_seq_read( | ||
64 | struct file *file, | ||
65 | char __user *buf, | ||
66 | size_t size, | ||
67 | loff_t *ppos) | ||
68 | { | ||
69 | struct dentry *d = file->f_path.dentry; | ||
70 | int srcu_idx; | ||
71 | ssize_t r; | ||
72 | |||
73 | r = debugfs_use_file_start(d, &srcu_idx); | ||
74 | if (likely(!r)) | ||
75 | r = seq_read(file, buf, size, ppos); | ||
76 | debugfs_use_file_finish(srcu_idx); | ||
77 | return r; | ||
78 | } | ||
79 | |||
80 | static loff_t hfi1_seq_lseek( | ||
81 | struct file *file, | ||
82 | loff_t offset, | ||
83 | int whence) | ||
84 | { | ||
85 | struct dentry *d = file->f_path.dentry; | ||
86 | int srcu_idx; | ||
87 | loff_t r; | ||
88 | |||
89 | r = debugfs_use_file_start(d, &srcu_idx); | ||
90 | if (likely(!r)) | ||
91 | r = seq_lseek(file, offset, whence); | ||
92 | debugfs_use_file_finish(srcu_idx); | ||
93 | return r; | ||
94 | } | ||
95 | |||
62 | #define private2dd(file) (file_inode(file)->i_private) | 96 | #define private2dd(file) (file_inode(file)->i_private) |
63 | #define private2ppd(file) (file_inode(file)->i_private) | 97 | #define private2ppd(file) (file_inode(file)->i_private) |
64 | 98 | ||
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \ | |||
87 | static const struct file_operations _##name##_file_ops = { \ | 121 | static const struct file_operations _##name##_file_ops = { \ |
88 | .owner = THIS_MODULE, \ | 122 | .owner = THIS_MODULE, \ |
89 | .open = _##name##_open, \ | 123 | .open = _##name##_open, \ |
90 | .read = seq_read, \ | 124 | .read = hfi1_seq_read, \ |
91 | .llseek = seq_lseek, \ | 125 | .llseek = hfi1_seq_lseek, \ |
92 | .release = seq_release \ | 126 | .release = seq_release \ |
93 | } | 127 | } |
94 | 128 | ||
@@ -105,11 +139,9 @@ do { \ | |||
105 | DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO) | 139 | DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO) |
106 | 140 | ||
107 | static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) | 141 | static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos) |
108 | __acquires(RCU) | ||
109 | { | 142 | { |
110 | struct hfi1_opcode_stats_perctx *opstats; | 143 | struct hfi1_opcode_stats_perctx *opstats; |
111 | 144 | ||
112 | rcu_read_lock(); | ||
113 | if (*pos >= ARRAY_SIZE(opstats->stats)) | 145 | if (*pos >= ARRAY_SIZE(opstats->stats)) |
114 | return NULL; | 146 | return NULL; |
115 | return pos; | 147 | return pos; |
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
126 | } | 158 | } |
127 | 159 | ||
128 | static void _opcode_stats_seq_stop(struct seq_file *s, void *v) | 160 | static void _opcode_stats_seq_stop(struct seq_file *s, void *v) |
129 | __releases(RCU) | ||
130 | { | 161 | { |
131 | rcu_read_unlock(); | ||
132 | } | 162 | } |
133 | 163 | ||
134 | static int _opcode_stats_seq_show(struct seq_file *s, void *v) | 164 | static int _opcode_stats_seq_show(struct seq_file *s, void *v) |
@@ -223,28 +253,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats) | |||
223 | DEBUGFS_FILE_OPS(ctx_stats); | 253 | DEBUGFS_FILE_OPS(ctx_stats); |
224 | 254 | ||
225 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) | 255 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) |
226 | __acquires(RCU) | 256 | __acquires(RCU) |
227 | { | 257 | { |
228 | struct qp_iter *iter; | 258 | struct qp_iter *iter; |
229 | loff_t n = *pos; | 259 | loff_t n = *pos; |
230 | 260 | ||
231 | rcu_read_lock(); | ||
232 | iter = qp_iter_init(s->private); | 261 | iter = qp_iter_init(s->private); |
262 | |||
263 | /* stop calls rcu_read_unlock */ | ||
264 | rcu_read_lock(); | ||
265 | |||
233 | if (!iter) | 266 | if (!iter) |
234 | return NULL; | 267 | return NULL; |
235 | 268 | ||
236 | while (n--) { | 269 | do { |
237 | if (qp_iter_next(iter)) { | 270 | if (qp_iter_next(iter)) { |
238 | kfree(iter); | 271 | kfree(iter); |
239 | return NULL; | 272 | return NULL; |
240 | } | 273 | } |
241 | } | 274 | } while (n--); |
242 | 275 | ||
243 | return iter; | 276 | return iter; |
244 | } | 277 | } |
245 | 278 | ||
246 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | 279 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, |
247 | loff_t *pos) | 280 | loff_t *pos) |
281 | __must_hold(RCU) | ||
248 | { | 282 | { |
249 | struct qp_iter *iter = iter_ptr; | 283 | struct qp_iter *iter = iter_ptr; |
250 | 284 | ||
@@ -259,7 +293,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | |||
259 | } | 293 | } |
260 | 294 | ||
261 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) | 295 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) |
262 | __releases(RCU) | 296 | __releases(RCU) |
263 | { | 297 | { |
264 | rcu_read_unlock(); | 298 | rcu_read_unlock(); |
265 | } | 299 | } |
@@ -281,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats) | |||
281 | DEBUGFS_FILE_OPS(qp_stats); | 315 | DEBUGFS_FILE_OPS(qp_stats); |
282 | 316 | ||
283 | static void *_sdes_seq_start(struct seq_file *s, loff_t *pos) | 317 | static void *_sdes_seq_start(struct seq_file *s, loff_t *pos) |
284 | __acquires(RCU) | ||
285 | { | 318 | { |
286 | struct hfi1_ibdev *ibd; | 319 | struct hfi1_ibdev *ibd; |
287 | struct hfi1_devdata *dd; | 320 | struct hfi1_devdata *dd; |
288 | 321 | ||
289 | rcu_read_lock(); | ||
290 | ibd = (struct hfi1_ibdev *)s->private; | 322 | ibd = (struct hfi1_ibdev *)s->private; |
291 | dd = dd_from_dev(ibd); | 323 | dd = dd_from_dev(ibd); |
292 | if (!dd->per_sdma || *pos >= dd->num_sdma) | 324 | if (!dd->per_sdma || *pos >= dd->num_sdma) |
@@ -306,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
306 | } | 338 | } |
307 | 339 | ||
308 | static void _sdes_seq_stop(struct seq_file *s, void *v) | 340 | static void _sdes_seq_stop(struct seq_file *s, void *v) |
309 | __releases(RCU) | ||
310 | { | 341 | { |
311 | rcu_read_unlock(); | ||
312 | } | 342 | } |
313 | 343 | ||
314 | static int _sdes_seq_show(struct seq_file *s, void *v) | 344 | static int _sdes_seq_show(struct seq_file *s, void *v) |
@@ -335,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf, | |||
335 | struct hfi1_devdata *dd; | 365 | struct hfi1_devdata *dd; |
336 | ssize_t rval; | 366 | ssize_t rval; |
337 | 367 | ||
338 | rcu_read_lock(); | ||
339 | dd = private2dd(file); | 368 | dd = private2dd(file); |
340 | avail = hfi1_read_cntrs(dd, NULL, &counters); | 369 | avail = hfi1_read_cntrs(dd, NULL, &counters); |
341 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); | 370 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); |
342 | rcu_read_unlock(); | ||
343 | return rval; | 371 | return rval; |
344 | } | 372 | } |
345 | 373 | ||
@@ -352,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, | |||
352 | struct hfi1_devdata *dd; | 380 | struct hfi1_devdata *dd; |
353 | ssize_t rval; | 381 | ssize_t rval; |
354 | 382 | ||
355 | rcu_read_lock(); | ||
356 | dd = private2dd(file); | 383 | dd = private2dd(file); |
357 | avail = hfi1_read_cntrs(dd, &names, NULL); | 384 | avail = hfi1_read_cntrs(dd, &names, NULL); |
358 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); | 385 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); |
359 | rcu_read_unlock(); | ||
360 | return rval; | 386 | return rval; |
361 | } | 387 | } |
362 | 388 | ||
@@ -379,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf, | |||
379 | struct hfi1_devdata *dd; | 405 | struct hfi1_devdata *dd; |
380 | ssize_t rval; | 406 | ssize_t rval; |
381 | 407 | ||
382 | rcu_read_lock(); | ||
383 | dd = private2dd(file); | 408 | dd = private2dd(file); |
384 | avail = hfi1_read_portcntrs(dd->pport, &names, NULL); | 409 | avail = hfi1_read_portcntrs(dd->pport, &names, NULL); |
385 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); | 410 | rval = simple_read_from_buffer(buf, count, ppos, names, avail); |
386 | rcu_read_unlock(); | ||
387 | return rval; | 411 | return rval; |
388 | } | 412 | } |
389 | 413 | ||
@@ -396,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, | |||
396 | struct hfi1_pportdata *ppd; | 420 | struct hfi1_pportdata *ppd; |
397 | ssize_t rval; | 421 | ssize_t rval; |
398 | 422 | ||
399 | rcu_read_lock(); | ||
400 | ppd = private2ppd(file); | 423 | ppd = private2ppd(file); |
401 | avail = hfi1_read_portcntrs(ppd, NULL, &counters); | 424 | avail = hfi1_read_portcntrs(ppd, NULL, &counters); |
402 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); | 425 | rval = simple_read_from_buffer(buf, count, ppos, counters, avail); |
403 | rcu_read_unlock(); | ||
404 | return rval; | 426 | return rval; |
405 | } | 427 | } |
406 | 428 | ||
@@ -430,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf, | |||
430 | int used; | 452 | int used; |
431 | int i; | 453 | int i; |
432 | 454 | ||
433 | rcu_read_lock(); | ||
434 | ppd = private2ppd(file); | 455 | ppd = private2ppd(file); |
435 | dd = ppd->dd; | 456 | dd = ppd->dd; |
436 | size = PAGE_SIZE; | 457 | size = PAGE_SIZE; |
437 | used = 0; | 458 | used = 0; |
438 | tmp = kmalloc(size, GFP_KERNEL); | 459 | tmp = kmalloc(size, GFP_KERNEL); |
439 | if (!tmp) { | 460 | if (!tmp) |
440 | rcu_read_unlock(); | ||
441 | return -ENOMEM; | 461 | return -ENOMEM; |
442 | } | ||
443 | 462 | ||
444 | scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); | 463 | scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); |
445 | used += scnprintf(tmp + used, size - used, | 464 | used += scnprintf(tmp + used, size - used, |
@@ -466,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf, | |||
466 | used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); | 485 | used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); |
467 | 486 | ||
468 | ret = simple_read_from_buffer(buf, count, ppos, tmp, used); | 487 | ret = simple_read_from_buffer(buf, count, ppos, tmp, used); |
469 | rcu_read_unlock(); | ||
470 | kfree(tmp); | 488 | kfree(tmp); |
471 | return ret; | 489 | return ret; |
472 | } | 490 | } |
@@ -482,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf, | |||
482 | u64 scratch0; | 500 | u64 scratch0; |
483 | u64 clear; | 501 | u64 clear; |
484 | 502 | ||
485 | rcu_read_lock(); | ||
486 | ppd = private2ppd(file); | 503 | ppd = private2ppd(file); |
487 | dd = ppd->dd; | 504 | dd = ppd->dd; |
488 | 505 | ||
489 | buff = kmalloc(count + 1, GFP_KERNEL); | 506 | buff = kmalloc(count + 1, GFP_KERNEL); |
490 | if (!buff) { | 507 | if (!buff) |
491 | ret = -ENOMEM; | 508 | return -ENOMEM; |
492 | goto do_return; | ||
493 | } | ||
494 | 509 | ||
495 | ret = copy_from_user(buff, buf, count); | 510 | ret = copy_from_user(buff, buf, count); |
496 | if (ret > 0) { | 511 | if (ret > 0) { |
@@ -523,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf, | |||
523 | 538 | ||
524 | do_free: | 539 | do_free: |
525 | kfree(buff); | 540 | kfree(buff); |
526 | do_return: | ||
527 | rcu_read_unlock(); | ||
528 | return ret; | 541 | return ret; |
529 | } | 542 | } |
530 | 543 | ||
@@ -538,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf, | |||
538 | char *tmp; | 551 | char *tmp; |
539 | int ret; | 552 | int ret; |
540 | 553 | ||
541 | rcu_read_lock(); | ||
542 | ppd = private2ppd(file); | 554 | ppd = private2ppd(file); |
543 | tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); | 555 | tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); |
544 | if (!tmp) { | 556 | if (!tmp) |
545 | rcu_read_unlock(); | ||
546 | return -ENOMEM; | 557 | return -ENOMEM; |
547 | } | ||
548 | 558 | ||
549 | ret = qsfp_dump(ppd, tmp, PAGE_SIZE); | 559 | ret = qsfp_dump(ppd, tmp, PAGE_SIZE); |
550 | if (ret > 0) | 560 | if (ret > 0) |
551 | ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); | 561 | ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); |
552 | rcu_read_unlock(); | ||
553 | kfree(tmp); | 562 | kfree(tmp); |
554 | return ret; | 563 | return ret; |
555 | } | 564 | } |
@@ -565,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, | |||
565 | int offset; | 574 | int offset; |
566 | int total_written; | 575 | int total_written; |
567 | 576 | ||
568 | rcu_read_lock(); | ||
569 | ppd = private2ppd(file); | 577 | ppd = private2ppd(file); |
570 | 578 | ||
571 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ | 579 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ |
@@ -573,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, | |||
573 | offset = *ppos & 0xffff; | 581 | offset = *ppos & 0xffff; |
574 | 582 | ||
575 | /* explicitly reject invalid address 0 to catch cp and cat */ | 583 | /* explicitly reject invalid address 0 to catch cp and cat */ |
576 | if (i2c_addr == 0) { | 584 | if (i2c_addr == 0) |
577 | ret = -EINVAL; | 585 | return -EINVAL; |
578 | goto _return; | ||
579 | } | ||
580 | 586 | ||
581 | buff = kmalloc(count, GFP_KERNEL); | 587 | buff = kmalloc(count, GFP_KERNEL); |
582 | if (!buff) { | 588 | if (!buff) |
583 | ret = -ENOMEM; | 589 | return -ENOMEM; |
584 | goto _return; | ||
585 | } | ||
586 | 590 | ||
587 | ret = copy_from_user(buff, buf, count); | 591 | ret = copy_from_user(buff, buf, count); |
588 | if (ret > 0) { | 592 | if (ret > 0) { |
@@ -602,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, | |||
602 | 606 | ||
603 | _free: | 607 | _free: |
604 | kfree(buff); | 608 | kfree(buff); |
605 | _return: | ||
606 | rcu_read_unlock(); | ||
607 | return ret; | 609 | return ret; |
608 | } | 610 | } |
609 | 611 | ||
@@ -632,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, | |||
632 | int offset; | 634 | int offset; |
633 | int total_read; | 635 | int total_read; |
634 | 636 | ||
635 | rcu_read_lock(); | ||
636 | ppd = private2ppd(file); | 637 | ppd = private2ppd(file); |
637 | 638 | ||
638 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ | 639 | /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ |
@@ -640,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, | |||
640 | offset = *ppos & 0xffff; | 641 | offset = *ppos & 0xffff; |
641 | 642 | ||
642 | /* explicitly reject invalid address 0 to catch cp and cat */ | 643 | /* explicitly reject invalid address 0 to catch cp and cat */ |
643 | if (i2c_addr == 0) { | 644 | if (i2c_addr == 0) |
644 | ret = -EINVAL; | 645 | return -EINVAL; |
645 | goto _return; | ||
646 | } | ||
647 | 646 | ||
648 | buff = kmalloc(count, GFP_KERNEL); | 647 | buff = kmalloc(count, GFP_KERNEL); |
649 | if (!buff) { | 648 | if (!buff) |
650 | ret = -ENOMEM; | 649 | return -ENOMEM; |
651 | goto _return; | ||
652 | } | ||
653 | 650 | ||
654 | total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); | 651 | total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); |
655 | if (total_read < 0) { | 652 | if (total_read < 0) { |
@@ -669,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, | |||
669 | 666 | ||
670 | _free: | 667 | _free: |
671 | kfree(buff); | 668 | kfree(buff); |
672 | _return: | ||
673 | rcu_read_unlock(); | ||
674 | return ret; | 669 | return ret; |
675 | } | 670 | } |
676 | 671 | ||
@@ -697,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, | |||
697 | int ret; | 692 | int ret; |
698 | int total_written; | 693 | int total_written; |
699 | 694 | ||
700 | rcu_read_lock(); | 695 | if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */ |
701 | if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ | 696 | return -EINVAL; |
702 | ret = -EINVAL; | ||
703 | goto _return; | ||
704 | } | ||
705 | 697 | ||
706 | ppd = private2ppd(file); | 698 | ppd = private2ppd(file); |
707 | 699 | ||
708 | buff = kmalloc(count, GFP_KERNEL); | 700 | buff = kmalloc(count, GFP_KERNEL); |
709 | if (!buff) { | 701 | if (!buff) |
710 | ret = -ENOMEM; | 702 | return -ENOMEM; |
711 | goto _return; | ||
712 | } | ||
713 | 703 | ||
714 | ret = copy_from_user(buff, buf, count); | 704 | ret = copy_from_user(buff, buf, count); |
715 | if (ret > 0) { | 705 | if (ret > 0) { |
716 | ret = -EFAULT; | 706 | ret = -EFAULT; |
717 | goto _free; | 707 | goto _free; |
718 | } | 708 | } |
719 | |||
720 | total_written = qsfp_write(ppd, target, *ppos, buff, count); | 709 | total_written = qsfp_write(ppd, target, *ppos, buff, count); |
721 | if (total_written < 0) { | 710 | if (total_written < 0) { |
722 | ret = total_written; | 711 | ret = total_written; |
@@ -729,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, | |||
729 | 718 | ||
730 | _free: | 719 | _free: |
731 | kfree(buff); | 720 | kfree(buff); |
732 | _return: | ||
733 | rcu_read_unlock(); | ||
734 | return ret; | 721 | return ret; |
735 | } | 722 | } |
736 | 723 | ||
@@ -757,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, | |||
757 | int ret; | 744 | int ret; |
758 | int total_read; | 745 | int total_read; |
759 | 746 | ||
760 | rcu_read_lock(); | ||
761 | if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ | 747 | if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */ |
762 | ret = -EINVAL; | 748 | ret = -EINVAL; |
763 | goto _return; | 749 | goto _return; |
@@ -790,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, | |||
790 | _free: | 776 | _free: |
791 | kfree(buff); | 777 | kfree(buff); |
792 | _return: | 778 | _return: |
793 | rcu_read_unlock(); | ||
794 | return ret; | 779 | return ret; |
795 | } | 780 | } |
796 | 781 | ||
@@ -1006,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd) | |||
1006 | debugfs_remove_recursive(ibd->hfi1_ibdev_dbg); | 991 | debugfs_remove_recursive(ibd->hfi1_ibdev_dbg); |
1007 | out: | 992 | out: |
1008 | ibd->hfi1_ibdev_dbg = NULL; | 993 | ibd->hfi1_ibdev_dbg = NULL; |
1009 | synchronize_rcu(); | ||
1010 | } | 994 | } |
1011 | 995 | ||
1012 | /* | 996 | /* |
@@ -1031,9 +1015,7 @@ static const char * const hfi1_statnames[] = { | |||
1031 | }; | 1015 | }; |
1032 | 1016 | ||
1033 | static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos) | 1017 | static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos) |
1034 | __acquires(RCU) | ||
1035 | { | 1018 | { |
1036 | rcu_read_lock(); | ||
1037 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) | 1019 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) |
1038 | return NULL; | 1020 | return NULL; |
1039 | return pos; | 1021 | return pos; |
@@ -1051,9 +1033,7 @@ static void *_driver_stats_names_seq_next( | |||
1051 | } | 1033 | } |
1052 | 1034 | ||
1053 | static void _driver_stats_names_seq_stop(struct seq_file *s, void *v) | 1035 | static void _driver_stats_names_seq_stop(struct seq_file *s, void *v) |
1054 | __releases(RCU) | ||
1055 | { | 1036 | { |
1056 | rcu_read_unlock(); | ||
1057 | } | 1037 | } |
1058 | 1038 | ||
1059 | static int _driver_stats_names_seq_show(struct seq_file *s, void *v) | 1039 | static int _driver_stats_names_seq_show(struct seq_file *s, void *v) |
@@ -1069,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names) | |||
1069 | DEBUGFS_FILE_OPS(driver_stats_names); | 1049 | DEBUGFS_FILE_OPS(driver_stats_names); |
1070 | 1050 | ||
1071 | static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos) | 1051 | static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos) |
1072 | __acquires(RCU) | ||
1073 | { | 1052 | { |
1074 | rcu_read_lock(); | ||
1075 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) | 1053 | if (*pos >= ARRAY_SIZE(hfi1_statnames)) |
1076 | return NULL; | 1054 | return NULL; |
1077 | return pos; | 1055 | return pos; |
@@ -1086,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
1086 | } | 1064 | } |
1087 | 1065 | ||
1088 | static void _driver_stats_seq_stop(struct seq_file *s, void *v) | 1066 | static void _driver_stats_seq_stop(struct seq_file *s, void *v) |
1089 | __releases(RCU) | ||
1090 | { | 1067 | { |
1091 | rcu_read_unlock(); | ||
1092 | } | 1068 | } |
1093 | 1069 | ||
1094 | static u64 hfi1_sps_ints(void) | 1070 | static u64 hfi1_sps_ints(void) |
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 8246dc7d0573..303f10555729 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c | |||
@@ -888,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd) | |||
888 | } | 888 | } |
889 | 889 | ||
890 | static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, | 890 | static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, |
891 | struct hfi1_packet packet, | 891 | struct hfi1_packet *packet, |
892 | struct hfi1_devdata *dd) | 892 | struct hfi1_devdata *dd) |
893 | { | 893 | { |
894 | struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; | 894 | struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; |
895 | struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd, | 895 | struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd, |
896 | packet.rhf_addr); | 896 | packet->rhf_addr); |
897 | u8 etype = rhf_rcv_type(packet->rhf); | ||
897 | 898 | ||
898 | if (hdr2sc(hdr, packet.rhf) != 0xf) { | 899 | if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) { |
899 | int hwstate = read_logical_state(dd); | 900 | int hwstate = read_logical_state(dd); |
900 | 901 | ||
901 | if (hwstate != LSTATE_ACTIVE) { | 902 | if (hwstate != LSTATE_ACTIVE) { |
@@ -979,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) | |||
979 | /* Auto activate link on non-SC15 packet receive */ | 980 | /* Auto activate link on non-SC15 packet receive */ |
980 | if (unlikely(rcd->ppd->host_link_state == | 981 | if (unlikely(rcd->ppd->host_link_state == |
981 | HLS_UP_ARMED) && | 982 | HLS_UP_ARMED) && |
982 | set_armed_to_active(rcd, packet, dd)) | 983 | set_armed_to_active(rcd, &packet, dd)) |
983 | goto bail; | 984 | goto bail; |
984 | last = process_rcv_packet(&packet, thread); | 985 | last = process_rcv_packet(&packet, thread); |
985 | } | 986 | } |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 1ecbec192358..7e03ccd2554d 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -183,6 +183,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) | |||
183 | if (fd) { | 183 | if (fd) { |
184 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ | 184 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ |
185 | fd->mm = current->mm; | 185 | fd->mm = current->mm; |
186 | atomic_inc(&fd->mm->mm_count); | ||
186 | } | 187 | } |
187 | 188 | ||
188 | fp->private_data = fd; | 189 | fp->private_data = fd; |
@@ -222,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | |||
222 | ret = assign_ctxt(fp, &uinfo); | 223 | ret = assign_ctxt(fp, &uinfo); |
223 | if (ret < 0) | 224 | if (ret < 0) |
224 | return ret; | 225 | return ret; |
225 | setup_ctxt(fp); | 226 | ret = setup_ctxt(fp); |
226 | if (ret) | 227 | if (ret) |
227 | return ret; | 228 | return ret; |
228 | ret = user_init(fp); | 229 | ret = user_init(fp); |
@@ -779,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
779 | mutex_unlock(&hfi1_mutex); | 780 | mutex_unlock(&hfi1_mutex); |
780 | hfi1_free_ctxtdata(dd, uctxt); | 781 | hfi1_free_ctxtdata(dd, uctxt); |
781 | done: | 782 | done: |
783 | mmdrop(fdata->mm); | ||
782 | kobject_put(&dd->kobj); | 784 | kobject_put(&dd->kobj); |
783 | kfree(fdata); | 785 | kfree(fdata); |
784 | return 0; | 786 | return 0; |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 1000e0fd96d9..325ec211370f 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -605,6 +605,7 @@ struct hfi1_pportdata { | |||
605 | struct work_struct freeze_work; | 605 | struct work_struct freeze_work; |
606 | struct work_struct link_downgrade_work; | 606 | struct work_struct link_downgrade_work; |
607 | struct work_struct link_bounce_work; | 607 | struct work_struct link_bounce_work; |
608 | struct delayed_work start_link_work; | ||
608 | /* host link state variables */ | 609 | /* host link state variables */ |
609 | struct mutex hls_lock; | 610 | struct mutex hls_lock; |
610 | u32 host_link_state; | 611 | u32 host_link_state; |
@@ -659,6 +660,7 @@ struct hfi1_pportdata { | |||
659 | u8 linkinit_reason; | 660 | u8 linkinit_reason; |
660 | u8 local_tx_rate; /* rate given to 8051 firmware */ | 661 | u8 local_tx_rate; /* rate given to 8051 firmware */ |
661 | u8 last_pstate; /* info only */ | 662 | u8 last_pstate; /* info only */ |
663 | u8 qsfp_retry_count; | ||
662 | 664 | ||
663 | /* placeholders for IB MAD packet settings */ | 665 | /* placeholders for IB MAD packet settings */ |
664 | u8 overrun_threshold; | 666 | u8 overrun_threshold; |
@@ -1272,9 +1274,26 @@ static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf) | |||
1272 | ((!!(rhf_dc_info(rhf))) << 4); | 1274 | ((!!(rhf_dc_info(rhf))) << 4); |
1273 | } | 1275 | } |
1274 | 1276 | ||
1277 | #define HFI1_JKEY_WIDTH 16 | ||
1278 | #define HFI1_JKEY_MASK (BIT(16) - 1) | ||
1279 | #define HFI1_ADMIN_JKEY_RANGE 32 | ||
1280 | |||
1281 | /* | ||
1282 | * J_KEYs are split and allocated in the following groups: | ||
1283 | * 0 - 31 - users with administrator privileges | ||
1284 | * 32 - 63 - kernel protocols using KDETH packets | ||
1285 | * 64 - 65535 - all other users using KDETH packets | ||
1286 | */ | ||
1275 | static inline u16 generate_jkey(kuid_t uid) | 1287 | static inline u16 generate_jkey(kuid_t uid) |
1276 | { | 1288 | { |
1277 | return from_kuid(current_user_ns(), uid) & 0xffff; | 1289 | u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK; |
1290 | |||
1291 | if (capable(CAP_SYS_ADMIN)) | ||
1292 | jkey &= HFI1_ADMIN_JKEY_RANGE - 1; | ||
1293 | else if (jkey < 64) | ||
1294 | jkey |= BIT(HFI1_JKEY_WIDTH - 1); | ||
1295 | |||
1296 | return jkey; | ||
1278 | } | 1297 | } |
1279 | 1298 | ||
1280 | /* | 1299 | /* |
@@ -1656,7 +1675,6 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd) | |||
1656 | struct hfi1_devdata *hfi1_init_dd(struct pci_dev *, | 1675 | struct hfi1_devdata *hfi1_init_dd(struct pci_dev *, |
1657 | const struct pci_device_id *); | 1676 | const struct pci_device_id *); |
1658 | void hfi1_free_devdata(struct hfi1_devdata *); | 1677 | void hfi1_free_devdata(struct hfi1_devdata *); |
1659 | void cc_state_reclaim(struct rcu_head *rcu); | ||
1660 | struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); | 1678 | struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); |
1661 | 1679 | ||
1662 | /* LED beaconing functions */ | 1680 | /* LED beaconing functions */ |
@@ -1788,7 +1806,7 @@ extern unsigned int hfi1_max_mtu; | |||
1788 | extern unsigned int hfi1_cu; | 1806 | extern unsigned int hfi1_cu; |
1789 | extern unsigned int user_credit_return_threshold; | 1807 | extern unsigned int user_credit_return_threshold; |
1790 | extern int num_user_contexts; | 1808 | extern int num_user_contexts; |
1791 | extern unsigned n_krcvqs; | 1809 | extern unsigned long n_krcvqs; |
1792 | extern uint krcvqs[]; | 1810 | extern uint krcvqs[]; |
1793 | extern int krcvqsset; | 1811 | extern int krcvqsset; |
1794 | extern uint kdeth_qp; | 1812 | extern uint kdeth_qp; |
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index a358d23ecd54..384b43d2fd49 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); | |||
94 | MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); | 94 | MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); |
95 | 95 | ||
96 | /* computed based on above array */ | 96 | /* computed based on above array */ |
97 | unsigned n_krcvqs; | 97 | unsigned long n_krcvqs; |
98 | 98 | ||
99 | static unsigned hfi1_rcvarr_split = 25; | 99 | static unsigned hfi1_rcvarr_split = 25; |
100 | module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); | 100 | module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO); |
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, | |||
500 | INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); | 500 | INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); |
501 | INIT_WORK(&ppd->sma_message_work, handle_sma_message); | 501 | INIT_WORK(&ppd->sma_message_work, handle_sma_message); |
502 | INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); | 502 | INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); |
503 | INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link); | ||
503 | INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); | 504 | INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); |
504 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); | 505 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); |
505 | 506 | ||
@@ -1333,7 +1334,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) | |||
1333 | spin_unlock(&ppd->cc_state_lock); | 1334 | spin_unlock(&ppd->cc_state_lock); |
1334 | 1335 | ||
1335 | if (cc_state) | 1336 | if (cc_state) |
1336 | call_rcu(&cc_state->rcu, cc_state_reclaim); | 1337 | kfree_rcu(cc_state, rcu); |
1337 | } | 1338 | } |
1338 | 1339 | ||
1339 | free_credit_return(dd); | 1340 | free_credit_return(dd); |
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 1263abe01999..7ffc14f21523 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c | |||
@@ -1819,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, | |||
1819 | u32 len = OPA_AM_CI_LEN(am) + 1; | 1819 | u32 len = OPA_AM_CI_LEN(am) + 1; |
1820 | int ret; | 1820 | int ret; |
1821 | 1821 | ||
1822 | if (dd->pport->port_type != PORT_TYPE_QSFP) { | ||
1823 | smp->status |= IB_SMP_INVALID_FIELD; | ||
1824 | return reply((struct ib_mad_hdr *)smp); | ||
1825 | } | ||
1826 | |||
1822 | #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ | 1827 | #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ |
1823 | #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) | 1828 | #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) |
1824 | #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) | 1829 | #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) |
@@ -2599,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, | |||
2599 | u8 lq, num_vls; | 2604 | u8 lq, num_vls; |
2600 | u8 res_lli, res_ler; | 2605 | u8 res_lli, res_ler; |
2601 | u64 port_mask; | 2606 | u64 port_mask; |
2602 | unsigned long port_num; | 2607 | u8 port_num; |
2603 | unsigned long vl; | 2608 | unsigned long vl; |
2604 | u32 vl_select_mask; | 2609 | u32 vl_select_mask; |
2605 | int vfi; | 2610 | int vfi; |
@@ -2633,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, | |||
2633 | */ | 2638 | */ |
2634 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 2639 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
2635 | port_num = find_first_bit((unsigned long *)&port_mask, | 2640 | port_num = find_first_bit((unsigned long *)&port_mask, |
2636 | sizeof(port_mask)); | 2641 | sizeof(port_mask) * 8); |
2637 | 2642 | ||
2638 | if ((u8)port_num != port) { | 2643 | if (port_num != port) { |
2639 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 2644 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
2640 | return reply((struct ib_mad_hdr *)pmp); | 2645 | return reply((struct ib_mad_hdr *)pmp); |
2641 | } | 2646 | } |
@@ -2837,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, | |||
2837 | */ | 2842 | */ |
2838 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 2843 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
2839 | port_num = find_first_bit((unsigned long *)&port_mask, | 2844 | port_num = find_first_bit((unsigned long *)&port_mask, |
2840 | sizeof(port_mask)); | 2845 | sizeof(port_mask) * 8); |
2841 | 2846 | ||
2842 | if (port_num != port) { | 2847 | if (port_num != port) { |
2843 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 2848 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
@@ -3010,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, | |||
3010 | */ | 3015 | */ |
3011 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 3016 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
3012 | port_num = find_first_bit((unsigned long *)&port_mask, | 3017 | port_num = find_first_bit((unsigned long *)&port_mask, |
3013 | sizeof(port_mask)); | 3018 | sizeof(port_mask) * 8); |
3014 | 3019 | ||
3015 | if (port_num != port) { | 3020 | if (port_num != port) { |
3016 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 3021 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
@@ -3247,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, | |||
3247 | */ | 3252 | */ |
3248 | port_mask = be64_to_cpu(req->port_select_mask[3]); | 3253 | port_mask = be64_to_cpu(req->port_select_mask[3]); |
3249 | port_num = find_first_bit((unsigned long *)&port_mask, | 3254 | port_num = find_first_bit((unsigned long *)&port_mask, |
3250 | sizeof(port_mask)); | 3255 | sizeof(port_mask) * 8); |
3251 | 3256 | ||
3252 | if (port_num != port) { | 3257 | if (port_num != port) { |
3253 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | 3258 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
@@ -3398,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd) | |||
3398 | 3403 | ||
3399 | spin_unlock(&ppd->cc_state_lock); | 3404 | spin_unlock(&ppd->cc_state_lock); |
3400 | 3405 | ||
3401 | call_rcu(&old_cc_state->rcu, cc_state_reclaim); | 3406 | kfree_rcu(old_cc_state, rcu); |
3402 | } | 3407 | } |
3403 | 3408 | ||
3404 | static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, | 3409 | static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, |
@@ -3553,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, | |||
3553 | return reply((struct ib_mad_hdr *)smp); | 3558 | return reply((struct ib_mad_hdr *)smp); |
3554 | } | 3559 | } |
3555 | 3560 | ||
3556 | void cc_state_reclaim(struct rcu_head *rcu) | ||
3557 | { | ||
3558 | struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu); | ||
3559 | |||
3560 | kfree(cc_state); | ||
3561 | } | ||
3562 | |||
3563 | static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, | 3561 | static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, |
3564 | struct ib_device *ibdev, u8 port, | 3562 | struct ib_device *ibdev, u8 port, |
3565 | u32 *resp_len) | 3563 | u32 *resp_len) |
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index 8c25e1b58849..3a1ef3056282 100644 --- a/drivers/infiniband/hw/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c | |||
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes) | |||
771 | read_extra_bytes(pbuf, from, to_fill); | 771 | read_extra_bytes(pbuf, from, to_fill); |
772 | from += to_fill; | 772 | from += to_fill; |
773 | nbytes -= to_fill; | 773 | nbytes -= to_fill; |
774 | /* may not be enough valid bytes left to align */ | ||
775 | if (extra > nbytes) | ||
776 | extra = nbytes; | ||
774 | 777 | ||
775 | /* ...now write carry */ | 778 | /* ...now write carry */ |
776 | dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); | 779 | dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); |
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes) | |||
798 | read_low_bytes(pbuf, from, extra); | 801 | read_low_bytes(pbuf, from, extra); |
799 | from += extra; | 802 | from += extra; |
800 | nbytes -= extra; | 803 | nbytes -= extra; |
804 | /* | ||
805 | * If no bytes are left, return early - we are done. | ||
806 | * NOTE: This short-circuit is *required* because | ||
807 | * "extra" may have been reduced in size and "from" | ||
808 | * is not aligned, as required when leaving this | ||
809 | * if block. | ||
810 | */ | ||
811 | if (nbytes == 0) | ||
812 | return; | ||
801 | } | 813 | } |
802 | 814 | ||
803 | /* at this point, from is QW aligned */ | 815 | /* at this point, from is QW aligned */ |
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index a5aa3517e7d5..4e4d8317c281 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c | |||
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) | |||
656 | 656 | ||
657 | iter->dev = dev; | 657 | iter->dev = dev; |
658 | iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; | 658 | iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; |
659 | if (qp_iter_next(iter)) { | ||
660 | kfree(iter); | ||
661 | return NULL; | ||
662 | } | ||
663 | 659 | ||
664 | return iter; | 660 | return iter; |
665 | } | 661 | } |
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index a207717ade2a..4e95ad810847 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c | |||
@@ -706,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len, | |||
706 | u8 *data) | 706 | u8 *data) |
707 | { | 707 | { |
708 | struct hfi1_pportdata *ppd; | 708 | struct hfi1_pportdata *ppd; |
709 | u32 excess_len = 0; | 709 | u32 excess_len = len; |
710 | int ret = 0; | 710 | int ret = 0, offset = 0; |
711 | 711 | ||
712 | if (port_num > dd->num_pports || port_num < 1) { | 712 | if (port_num > dd->num_pports || port_num < 1) { |
713 | dd_dev_info(dd, "%s: Invalid port number %d\n", | 713 | dd_dev_info(dd, "%s: Invalid port number %d\n", |
@@ -740,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len, | |||
740 | } | 740 | } |
741 | 741 | ||
742 | memcpy(data, &ppd->qsfp_info.cache[addr], len); | 742 | memcpy(data, &ppd->qsfp_info.cache[addr], len); |
743 | |||
744 | if (addr <= QSFP_MONITOR_VAL_END && | ||
745 | (addr + len) >= QSFP_MONITOR_VAL_START) { | ||
746 | /* Overlap with the dynamic channel monitor range */ | ||
747 | if (addr < QSFP_MONITOR_VAL_START) { | ||
748 | if (addr + len <= QSFP_MONITOR_VAL_END) | ||
749 | len = addr + len - QSFP_MONITOR_VAL_START; | ||
750 | else | ||
751 | len = QSFP_MONITOR_RANGE; | ||
752 | offset = QSFP_MONITOR_VAL_START - addr; | ||
753 | addr = QSFP_MONITOR_VAL_START; | ||
754 | } else if (addr == QSFP_MONITOR_VAL_START) { | ||
755 | offset = 0; | ||
756 | if (addr + len > QSFP_MONITOR_VAL_END) | ||
757 | len = QSFP_MONITOR_RANGE; | ||
758 | } else { | ||
759 | offset = 0; | ||
760 | if (addr + len > QSFP_MONITOR_VAL_END) | ||
761 | len = QSFP_MONITOR_VAL_END - addr + 1; | ||
762 | } | ||
763 | /* Refresh the values of the dynamic monitors from the cable */ | ||
764 | ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len); | ||
765 | if (ret != len) { | ||
766 | ret = -EAGAIN; | ||
767 | goto set_zeroes; | ||
768 | } | ||
769 | } | ||
770 | |||
743 | return 0; | 771 | return 0; |
744 | 772 | ||
745 | set_zeroes: | 773 | set_zeroes: |
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h index 69275ebd9597..36cf52359848 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.h +++ b/drivers/infiniband/hw/hfi1/qsfp.h | |||
@@ -74,6 +74,9 @@ | |||
74 | /* Defined fields that Intel requires of qualified cables */ | 74 | /* Defined fields that Intel requires of qualified cables */ |
75 | /* Byte 0 is Identifier, not checked */ | 75 | /* Byte 0 is Identifier, not checked */ |
76 | /* Byte 1 is reserved "status MSB" */ | 76 | /* Byte 1 is reserved "status MSB" */ |
77 | #define QSFP_MONITOR_VAL_START 22 | ||
78 | #define QSFP_MONITOR_VAL_END 81 | ||
79 | #define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1) | ||
77 | #define QSFP_TX_CTRL_BYTE_OFFS 86 | 80 | #define QSFP_TX_CTRL_BYTE_OFFS 86 |
78 | #define QSFP_PWR_CTRL_BYTE_OFFS 93 | 81 | #define QSFP_PWR_CTRL_BYTE_OFFS 93 |
79 | #define QSFP_CDR_CTRL_BYTE_OFFS 98 | 82 | #define QSFP_CDR_CTRL_BYTE_OFFS 98 |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 0ecf27903dc2..1694037d1eee 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 | |||
114 | #define KDETH_HCRC_LOWER_SHIFT 24 | 114 | #define KDETH_HCRC_LOWER_SHIFT 24 |
115 | #define KDETH_HCRC_LOWER_MASK 0xff | 115 | #define KDETH_HCRC_LOWER_MASK 0xff |
116 | 116 | ||
117 | #define AHG_KDETH_INTR_SHIFT 12 | ||
118 | |||
117 | #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) | 119 | #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) |
118 | #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) | 120 | #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) |
119 | 121 | ||
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, | |||
1480 | /* Clear KDETH.SH on last packet */ | 1482 | /* Clear KDETH.SH on last packet */ |
1481 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { | 1483 | if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { |
1482 | val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, | 1484 | val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, |
1483 | INTR) >> 16); | 1485 | INTR) << |
1486 | AHG_KDETH_INTR_SHIFT); | ||
1484 | val &= cpu_to_le16(~(1U << 13)); | 1487 | val &= cpu_to_le16(~(1U << 13)); |
1485 | AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); | 1488 | AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); |
1486 | } else { | 1489 | } else { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index b738acdb9b02..8ec09e470f84 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h | |||
@@ -232,7 +232,7 @@ struct i40iw_device { | |||
232 | struct i40e_client *client; | 232 | struct i40e_client *client; |
233 | struct i40iw_hw hw; | 233 | struct i40iw_hw hw; |
234 | struct i40iw_cm_core cm_core; | 234 | struct i40iw_cm_core cm_core; |
235 | unsigned long *mem_resources; | 235 | u8 *mem_resources; |
236 | unsigned long *allocated_qps; | 236 | unsigned long *allocated_qps; |
237 | unsigned long *allocated_cqs; | 237 | unsigned long *allocated_cqs; |
238 | unsigned long *allocated_mrs; | 238 | unsigned long *allocated_mrs; |
@@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev, | |||
435 | *next = resource_num + 1; | 435 | *next = resource_num + 1; |
436 | if (*next == max_resources) | 436 | if (*next == max_resources) |
437 | *next = 0; | 437 | *next = 0; |
438 | spin_unlock_irqrestore(&iwdev->resource_lock, flags); | ||
439 | *req_resource_num = resource_num; | 438 | *req_resource_num = resource_num; |
439 | spin_unlock_irqrestore(&iwdev->resource_lock, flags); | ||
440 | 440 | ||
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 5026dc79978a..7ca0638579c0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c | |||
@@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node, | |||
535 | buf += hdr_len; | 535 | buf += hdr_len; |
536 | } | 536 | } |
537 | 537 | ||
538 | if (pd_len) | 538 | if (pdata && pdata->addr) |
539 | memcpy(buf, pdata->addr, pd_len); | 539 | memcpy(buf, pdata->addr, pdata->size); |
540 | 540 | ||
541 | atomic_set(&sqbuf->refcount, 1); | 541 | atomic_set(&sqbuf->refcount, 1); |
542 | 542 | ||
@@ -3347,26 +3347,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp) | |||
3347 | } | 3347 | } |
3348 | 3348 | ||
3349 | /** | 3349 | /** |
3350 | * i40iw_loopback_nop - Send a nop | ||
3351 | * @qp: associated hw qp | ||
3352 | */ | ||
3353 | static void i40iw_loopback_nop(struct i40iw_sc_qp *qp) | ||
3354 | { | ||
3355 | u64 *wqe; | ||
3356 | u64 header; | ||
3357 | |||
3358 | wqe = qp->qp_uk.sq_base->elem; | ||
3359 | set_64bit_val(wqe, 0, 0); | ||
3360 | set_64bit_val(wqe, 8, 0); | ||
3361 | set_64bit_val(wqe, 16, 0); | ||
3362 | |||
3363 | header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | | ||
3364 | LS_64(0, I40IWQPSQ_SIGCOMPL) | | ||
3365 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); | ||
3366 | set_64bit_val(wqe, 24, header); | ||
3367 | } | ||
3368 | |||
3369 | /** | ||
3370 | * i40iw_qp_disconnect - free qp and close cm | 3350 | * i40iw_qp_disconnect - free qp and close cm |
3371 | * @iwqp: associate qp for the connection | 3351 | * @iwqp: associate qp for the connection |
3372 | */ | 3352 | */ |
@@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3638 | } else { | 3618 | } else { |
3639 | if (iwqp->page) | 3619 | if (iwqp->page) |
3640 | iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); | 3620 | iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); |
3641 | i40iw_loopback_nop(&iwqp->sc_qp); | 3621 | dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0); |
3642 | } | 3622 | } |
3643 | 3623 | ||
3644 | if (iwqp->page) | 3624 | if (iwqp->page) |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c index 3ee0cad96bc6..0c92a40b3e86 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_hw.c +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c | |||
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp, | |||
265 | info.dont_send_fin = false; | 265 | info.dont_send_fin = false; |
266 | if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) | 266 | if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) |
267 | info.reset_tcp_conn = true; | 267 | info.reset_tcp_conn = true; |
268 | iwqp->hw_iwarp_state = state; | ||
268 | i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); | 269 | i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); |
269 | } | 270 | } |
270 | 271 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 6e9081380a27..445e230d5ff8 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = { | |||
100 | .notifier_call = i40iw_net_event | 100 | .notifier_call = i40iw_net_event |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static int i40iw_notifiers_registered; | 103 | static atomic_t i40iw_notifiers_registered; |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * i40iw_find_i40e_handler - find a handler given a client info | 106 | * i40iw_find_i40e_handler - find a handler given a client info |
@@ -1342,12 +1342,11 @@ exit: | |||
1342 | */ | 1342 | */ |
1343 | static void i40iw_register_notifiers(void) | 1343 | static void i40iw_register_notifiers(void) |
1344 | { | 1344 | { |
1345 | if (!i40iw_notifiers_registered) { | 1345 | if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { |
1346 | register_inetaddr_notifier(&i40iw_inetaddr_notifier); | 1346 | register_inetaddr_notifier(&i40iw_inetaddr_notifier); |
1347 | register_inet6addr_notifier(&i40iw_inetaddr6_notifier); | 1347 | register_inet6addr_notifier(&i40iw_inetaddr6_notifier); |
1348 | register_netevent_notifier(&i40iw_net_notifier); | 1348 | register_netevent_notifier(&i40iw_net_notifier); |
1349 | } | 1349 | } |
1350 | i40iw_notifiers_registered++; | ||
1351 | } | 1350 | } |
1352 | 1351 | ||
1353 | /** | 1352 | /** |
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del | |||
1429 | i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); | 1428 | i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); |
1430 | /* fallthrough */ | 1429 | /* fallthrough */ |
1431 | case INET_NOTIFIER: | 1430 | case INET_NOTIFIER: |
1432 | if (i40iw_notifiers_registered > 0) { | 1431 | if (!atomic_dec_return(&i40iw_notifiers_registered)) { |
1433 | i40iw_notifiers_registered--; | ||
1434 | unregister_netevent_notifier(&i40iw_net_notifier); | 1432 | unregister_netevent_notifier(&i40iw_net_notifier); |
1435 | unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); | 1433 | unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); |
1436 | unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); | 1434 | unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); |
@@ -1558,6 +1556,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) | |||
1558 | enum i40iw_status_code status; | 1556 | enum i40iw_status_code status; |
1559 | struct i40iw_handler *hdl; | 1557 | struct i40iw_handler *hdl; |
1560 | 1558 | ||
1559 | hdl = i40iw_find_netdev(ldev->netdev); | ||
1560 | if (hdl) | ||
1561 | return 0; | ||
1562 | |||
1561 | hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); | 1563 | hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); |
1562 | if (!hdl) | 1564 | if (!hdl) |
1563 | return -ENOMEM; | 1565 | return -ENOMEM; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 0e8db0a35141..6fd043b1d714 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c | |||
@@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw, | |||
673 | { | 673 | { |
674 | if (!mem) | 674 | if (!mem) |
675 | return I40IW_ERR_PARAM; | 675 | return I40IW_ERR_PARAM; |
676 | /* | ||
677 | * mem->va points to the parent of mem, so both mem and mem->va | ||
678 | * can not be touched once mem->va is freed | ||
679 | */ | ||
676 | kfree(mem->va); | 680 | kfree(mem->va); |
677 | mem->va = NULL; | ||
678 | return 0; | 681 | return 0; |
679 | } | 682 | } |
680 | 683 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 2360338877bf..6329c971c22f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, | |||
794 | return &iwqp->ibqp; | 794 | return &iwqp->ibqp; |
795 | error: | 795 | error: |
796 | i40iw_free_qp_resources(iwdev, iwqp, qp_num); | 796 | i40iw_free_qp_resources(iwdev, iwqp, qp_num); |
797 | kfree(mem); | ||
798 | return ERR_PTR(err_code); | 797 | return ERR_PTR(err_code); |
799 | } | 798 | } |
800 | 799 | ||
@@ -1926,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr) | |||
1926 | } | 1925 | } |
1927 | if (iwpbl->pbl_allocated) | 1926 | if (iwpbl->pbl_allocated) |
1928 | i40iw_free_pble(iwdev->pble_rsrc, palloc); | 1927 | i40iw_free_pble(iwdev->pble_rsrc, palloc); |
1929 | kfree(iwpbl->iwmr); | 1928 | kfree(iwmr); |
1930 | iwpbl->iwmr = NULL; | ||
1931 | return 0; | 1929 | return 0; |
1932 | } | 1930 | } |
1933 | 1931 | ||
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index d6fc8a6e8c33..5df63dacaaa3 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) | |||
576 | checksum == cpu_to_be16(0xffff); | 576 | checksum == cpu_to_be16(0xffff); |
577 | } | 577 | } |
578 | 578 | ||
579 | static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, | 579 | static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, |
580 | unsigned tail, struct mlx4_cqe *cqe, int is_eth) | 580 | unsigned tail, struct mlx4_cqe *cqe, int is_eth) |
581 | { | 581 | { |
582 | struct mlx4_ib_proxy_sqp_hdr *hdr; | 582 | struct mlx4_ib_proxy_sqp_hdr *hdr; |
583 | 583 | ||
@@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct | |||
600 | wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); | 600 | wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); |
601 | wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); | 601 | wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); |
602 | } | 602 | } |
603 | |||
604 | return 0; | ||
605 | } | 603 | } |
606 | 604 | ||
607 | static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, | 605 | static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, |
@@ -689,12 +687,6 @@ repoll: | |||
689 | is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | 687 | is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == |
690 | MLX4_CQE_OPCODE_ERROR; | 688 | MLX4_CQE_OPCODE_ERROR; |
691 | 689 | ||
692 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && | ||
693 | is_send)) { | ||
694 | pr_warn("Completion for NOP opcode detected!\n"); | ||
695 | return -EINVAL; | ||
696 | } | ||
697 | |||
698 | /* Resize CQ in progress */ | 690 | /* Resize CQ in progress */ |
699 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { | 691 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { |
700 | if (cq->resize_buf) { | 692 | if (cq->resize_buf) { |
@@ -720,12 +712,6 @@ repoll: | |||
720 | */ | 712 | */ |
721 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, | 713 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, |
722 | be32_to_cpu(cqe->vlan_my_qpn)); | 714 | be32_to_cpu(cqe->vlan_my_qpn)); |
723 | if (unlikely(!mqp)) { | ||
724 | pr_warn("CQ %06x with entry for unknown QPN %06x\n", | ||
725 | cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); | ||
726 | return -EINVAL; | ||
727 | } | ||
728 | |||
729 | *cur_qp = to_mibqp(mqp); | 715 | *cur_qp = to_mibqp(mqp); |
730 | } | 716 | } |
731 | 717 | ||
@@ -738,11 +724,6 @@ repoll: | |||
738 | /* SRQ is also in the radix tree */ | 724 | /* SRQ is also in the radix tree */ |
739 | msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, | 725 | msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, |
740 | srq_num); | 726 | srq_num); |
741 | if (unlikely(!msrq)) { | ||
742 | pr_warn("CQ %06x with entry for unknown SRQN %06x\n", | ||
743 | cq->mcq.cqn, srq_num); | ||
744 | return -EINVAL; | ||
745 | } | ||
746 | } | 727 | } |
747 | 728 | ||
748 | if (is_send) { | 729 | if (is_send) { |
@@ -852,9 +833,11 @@ repoll: | |||
852 | if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { | 833 | if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { |
853 | if ((*cur_qp)->mlx4_ib_qp_type & | 834 | if ((*cur_qp)->mlx4_ib_qp_type & |
854 | (MLX4_IB_QPT_PROXY_SMI_OWNER | | 835 | (MLX4_IB_QPT_PROXY_SMI_OWNER | |
855 | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) | 836 | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { |
856 | return use_tunnel_data(*cur_qp, cq, wc, tail, | 837 | use_tunnel_data(*cur_qp, cq, wc, tail, cqe, |
857 | cqe, is_eth); | 838 | is_eth); |
839 | return 0; | ||
840 | } | ||
858 | } | 841 | } |
859 | 842 | ||
860 | wc->slid = be16_to_cpu(cqe->rlid); | 843 | wc->slid = be16_to_cpu(cqe->rlid); |
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
891 | struct mlx4_ib_qp *cur_qp = NULL; | 874 | struct mlx4_ib_qp *cur_qp = NULL; |
892 | unsigned long flags; | 875 | unsigned long flags; |
893 | int npolled; | 876 | int npolled; |
894 | int err = 0; | ||
895 | struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); | 877 | struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); |
896 | 878 | ||
897 | spin_lock_irqsave(&cq->lock, flags); | 879 | spin_lock_irqsave(&cq->lock, flags); |
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
901 | } | 883 | } |
902 | 884 | ||
903 | for (npolled = 0; npolled < num_entries; ++npolled) { | 885 | for (npolled = 0; npolled < num_entries; ++npolled) { |
904 | err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); | 886 | if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled)) |
905 | if (err) | ||
906 | break; | 887 | break; |
907 | } | 888 | } |
908 | 889 | ||
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
911 | out: | 892 | out: |
912 | spin_unlock_irqrestore(&cq->lock, flags); | 893 | spin_unlock_irqrestore(&cq->lock, flags); |
913 | 894 | ||
914 | if (err == 0 || err == -EAGAIN) | 895 | return npolled; |
915 | return npolled; | ||
916 | else | ||
917 | return err; | ||
918 | } | 896 | } |
919 | 897 | ||
920 | int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 898 | int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 9c2e53d28f98..0f21c3a25552 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work) | |||
1128 | 1128 | ||
1129 | /* Generate GUID changed event */ | 1129 | /* Generate GUID changed event */ |
1130 | if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { | 1130 | if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { |
1131 | if (mlx4_is_master(dev->dev)) { | ||
1132 | union ib_gid gid; | ||
1133 | int err = 0; | ||
1134 | |||
1135 | if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix) | ||
1136 | err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1); | ||
1137 | else | ||
1138 | gid.global.subnet_prefix = | ||
1139 | eqe->event.port_mgmt_change.params.port_info.gid_prefix; | ||
1140 | if (err) { | ||
1141 | pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n", | ||
1142 | port, err); | ||
1143 | } else { | ||
1144 | pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n", | ||
1145 | port, | ||
1146 | (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), | ||
1147 | be64_to_cpu(gid.global.subnet_prefix)); | ||
1148 | atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, | ||
1149 | be64_to_cpu(gid.global.subnet_prefix)); | ||
1150 | } | ||
1151 | } | ||
1131 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); | 1152 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); |
1132 | /*if master, notify all slaves*/ | 1153 | /*if master, notify all slaves*/ |
1133 | if (mlx4_is_master(dev->dev)) | 1154 | if (mlx4_is_master(dev->dev)) |
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) | |||
2202 | if (err) | 2223 | if (err) |
2203 | goto demux_err; | 2224 | goto demux_err; |
2204 | dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; | 2225 | dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; |
2226 | atomic64_set(&dev->sriov.demux[i].subnet_prefix, | ||
2227 | be64_to_cpu(gid.global.subnet_prefix)); | ||
2205 | err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, | 2228 | err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, |
2206 | &dev->sriov.sqps[i]); | 2229 | &dev->sriov.sqps[i]); |
2207 | if (err) | 2230 | if (err) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2af44c2de262..87ba9bca4181 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) | |||
2202 | bool per_port = !!(ibdev->dev->caps.flags2 & | 2202 | bool per_port = !!(ibdev->dev->caps.flags2 & |
2203 | MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT); | 2203 | MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT); |
2204 | 2204 | ||
2205 | if (mlx4_is_slave(ibdev->dev)) | ||
2206 | return 0; | ||
2207 | |||
2205 | for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { | 2208 | for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { |
2206 | /* i == 1 means we are building port counters */ | 2209 | /* i == 1 means we are building port counters */ |
2207 | if (i && !per_port) | 2210 | if (i && !per_port) |
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index 8f7ad07915b0..097bfcc4ee99 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c | |||
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group) | |||
489 | if (!group->members[i]) | 489 | if (!group->members[i]) |
490 | leave_state |= (1 << i); | 490 | leave_state |= (1 << i); |
491 | 491 | ||
492 | return leave_state & (group->rec.scope_join_state & 7); | 492 | return leave_state & (group->rec.scope_join_state & 0xf); |
493 | } | 493 | } |
494 | 494 | ||
495 | static int join_group(struct mcast_group *group, int slave, u8 join_mask) | 495 | static int join_group(struct mcast_group *group, int slave, u8 join_mask) |
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) | |||
564 | } else | 564 | } else |
565 | mcg_warn_group(group, "DRIVER BUG\n"); | 565 | mcg_warn_group(group, "DRIVER BUG\n"); |
566 | } else if (group->state == MCAST_LEAVE_SENT) { | 566 | } else if (group->state == MCAST_LEAVE_SENT) { |
567 | if (group->rec.scope_join_state & 7) | 567 | if (group->rec.scope_join_state & 0xf) |
568 | group->rec.scope_join_state &= 0xf8; | 568 | group->rec.scope_join_state &= 0xf0; |
569 | group->state = MCAST_IDLE; | 569 | group->state = MCAST_IDLE; |
570 | mutex_unlock(&group->lock); | 570 | mutex_unlock(&group->lock); |
571 | if (release_group(group, 1)) | 571 | if (release_group(group, 1)) |
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask, | |||
605 | static int handle_join_req(struct mcast_group *group, u8 join_mask, | 605 | static int handle_join_req(struct mcast_group *group, u8 join_mask, |
606 | struct mcast_req *req) | 606 | struct mcast_req *req) |
607 | { | 607 | { |
608 | u8 group_join_state = group->rec.scope_join_state & 7; | 608 | u8 group_join_state = group->rec.scope_join_state & 0xf; |
609 | int ref = 0; | 609 | int ref = 0; |
610 | u16 status; | 610 | u16 status; |
611 | struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; | 611 | struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; |
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work) | |||
690 | u8 cur_join_state; | 690 | u8 cur_join_state; |
691 | 691 | ||
692 | resp_join_state = ((struct ib_sa_mcmember_data *) | 692 | resp_join_state = ((struct ib_sa_mcmember_data *) |
693 | group->response_sa_mad.data)->scope_join_state & 7; | 693 | group->response_sa_mad.data)->scope_join_state & 0xf; |
694 | cur_join_state = group->rec.scope_join_state & 7; | 694 | cur_join_state = group->rec.scope_join_state & 0xf; |
695 | 695 | ||
696 | if (method == IB_MGMT_METHOD_GET_RESP) { | 696 | if (method == IB_MGMT_METHOD_GET_RESP) { |
697 | /* successfull join */ | 697 | /* successfull join */ |
@@ -710,7 +710,7 @@ process_requests: | |||
710 | req = list_first_entry(&group->pending_list, struct mcast_req, | 710 | req = list_first_entry(&group->pending_list, struct mcast_req, |
711 | group_list); | 711 | group_list); |
712 | sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; | 712 | sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; |
713 | req_join_state = sa_data->scope_join_state & 0x7; | 713 | req_join_state = sa_data->scope_join_state & 0xf; |
714 | 714 | ||
715 | /* For a leave request, we will immediately answer the VF, and | 715 | /* For a leave request, we will immediately answer the VF, and |
716 | * update our internal counters. The actual leave will be sent | 716 | * update our internal counters. The actual leave will be sent |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 7c5832ede4bd..686ab48ff644 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx { | |||
448 | struct workqueue_struct *wq; | 448 | struct workqueue_struct *wq; |
449 | struct workqueue_struct *ud_wq; | 449 | struct workqueue_struct *ud_wq; |
450 | spinlock_t ud_lock; | 450 | spinlock_t ud_lock; |
451 | __be64 subnet_prefix; | 451 | atomic64_t subnet_prefix; |
452 | __be64 guid_cache[128]; | 452 | __be64 guid_cache[128]; |
453 | struct mlx4_ib_dev *dev; | 453 | struct mlx4_ib_dev *dev; |
454 | /* the following lock protects both mcg_table and mcg_mgid0_list */ | 454 | /* the following lock protects both mcg_table and mcg_mgid0_list */ |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 768085f59566..7fb9629bd12b 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, | |||
2493 | sqp->ud_header.grh.flow_label = | 2493 | sqp->ud_header.grh.flow_label = |
2494 | ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); | 2494 | ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); |
2495 | sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; | 2495 | sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; |
2496 | if (is_eth) | 2496 | if (is_eth) { |
2497 | memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); | 2497 | memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); |
2498 | else { | 2498 | } else { |
2499 | if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { | 2499 | if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { |
2500 | /* When multi-function is enabled, the ib_core gid | 2500 | /* When multi-function is enabled, the ib_core gid |
2501 | * indexes don't necessarily match the hw ones, so | 2501 | * indexes don't necessarily match the hw ones, so |
2502 | * we must use our own cache */ | 2502 | * we must use our own cache |
2503 | sqp->ud_header.grh.source_gid.global.subnet_prefix = | 2503 | */ |
2504 | to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. | 2504 | sqp->ud_header.grh.source_gid.global.subnet_prefix = |
2505 | subnet_prefix; | 2505 | cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov. |
2506 | sqp->ud_header.grh.source_gid.global.interface_id = | 2506 | demux[sqp->qp.port - 1]. |
2507 | to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. | 2507 | subnet_prefix))); |
2508 | guid_cache[ah->av.ib.gid_index]; | 2508 | sqp->ud_header.grh.source_gid.global.interface_id = |
2509 | } else | 2509 | to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. |
2510 | ib_get_cached_gid(ib_dev, | 2510 | guid_cache[ah->av.ib.gid_index]; |
2511 | be32_to_cpu(ah->av.ib.port_pd) >> 24, | 2511 | } else { |
2512 | ah->av.ib.gid_index, | 2512 | ib_get_cached_gid(ib_dev, |
2513 | &sqp->ud_header.grh.source_gid, NULL); | 2513 | be32_to_cpu(ah->av.ib.port_pd) >> 24, |
2514 | ah->av.ib.gid_index, | ||
2515 | &sqp->ud_header.grh.source_gid, NULL); | ||
2516 | } | ||
2514 | } | 2517 | } |
2515 | memcpy(sqp->ud_header.grh.destination_gid.raw, | 2518 | memcpy(sqp->ud_header.grh.destination_gid.raw, |
2516 | ah->av.ib.dgid, 16); | 2519 | ah->av.ib.dgid, 16); |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 308a358e5b46..e4fac9292e4a 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -553,12 +553,6 @@ repoll: | |||
553 | * from the table. | 553 | * from the table. |
554 | */ | 554 | */ |
555 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); | 555 | mqp = __mlx5_qp_lookup(dev->mdev, qpn); |
556 | if (unlikely(!mqp)) { | ||
557 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", | ||
558 | cq->mcq.cqn, qpn); | ||
559 | return -EINVAL; | ||
560 | } | ||
561 | |||
562 | *cur_qp = to_mibqp(mqp); | 556 | *cur_qp = to_mibqp(mqp); |
563 | } | 557 | } |
564 | 558 | ||
@@ -619,13 +613,6 @@ repoll: | |||
619 | read_lock(&dev->mdev->priv.mkey_table.lock); | 613 | read_lock(&dev->mdev->priv.mkey_table.lock); |
620 | mmkey = __mlx5_mr_lookup(dev->mdev, | 614 | mmkey = __mlx5_mr_lookup(dev->mdev, |
621 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); | 615 | mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); |
622 | if (unlikely(!mmkey)) { | ||
623 | read_unlock(&dev->mdev->priv.mkey_table.lock); | ||
624 | mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", | ||
625 | cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | |||
629 | mr = to_mibmr(mmkey); | 616 | mr = to_mibmr(mmkey); |
630 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); | 617 | get_sig_err_item(sig_err_cqe, &mr->sig->err_item); |
631 | mr->sig->sig_err_exists = true; | 618 | mr->sig->sig_err_exists = true; |
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
676 | unsigned long flags; | 663 | unsigned long flags; |
677 | int soft_polled = 0; | 664 | int soft_polled = 0; |
678 | int npolled; | 665 | int npolled; |
679 | int err = 0; | ||
680 | 666 | ||
681 | spin_lock_irqsave(&cq->lock, flags); | 667 | spin_lock_irqsave(&cq->lock, flags); |
682 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | 668 | if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
688 | soft_polled = poll_soft_wc(cq, num_entries, wc); | 674 | soft_polled = poll_soft_wc(cq, num_entries, wc); |
689 | 675 | ||
690 | for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { | 676 | for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { |
691 | err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); | 677 | if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) |
692 | if (err) | ||
693 | break; | 678 | break; |
694 | } | 679 | } |
695 | 680 | ||
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
698 | out: | 683 | out: |
699 | spin_unlock_irqrestore(&cq->lock, flags); | 684 | spin_unlock_irqrestore(&cq->lock, flags); |
700 | 685 | ||
701 | if (err == 0 || err == -EAGAIN) | 686 | return soft_polled + npolled; |
702 | return soft_polled + npolled; | ||
703 | else | ||
704 | return err; | ||
705 | } | 687 | } |
706 | 688 | ||
707 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 689 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a84bb766fc62..e19537cf44ab 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/io-mapping.h> | ||
41 | #if defined(CONFIG_X86) | 40 | #if defined(CONFIG_X86) |
42 | #include <asm/pat.h> | 41 | #include <asm/pat.h> |
43 | #endif | 42 | #endif |
@@ -289,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, | |||
289 | 288 | ||
290 | static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) | 289 | static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) |
291 | { | 290 | { |
292 | return !MLX5_CAP_GEN(dev->mdev, ib_virt); | 291 | if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) |
292 | return !MLX5_CAP_GEN(dev->mdev, ib_virt); | ||
293 | return 0; | ||
293 | } | 294 | } |
294 | 295 | ||
295 | enum { | 296 | enum { |
@@ -1429,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v, | |||
1429 | dmac_47_16), | 1430 | dmac_47_16), |
1430 | ib_spec->eth.val.dst_mac); | 1431 | ib_spec->eth.val.dst_mac); |
1431 | 1432 | ||
1433 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, | ||
1434 | smac_47_16), | ||
1435 | ib_spec->eth.mask.src_mac); | ||
1436 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, | ||
1437 | smac_47_16), | ||
1438 | ib_spec->eth.val.src_mac); | ||
1439 | |||
1432 | if (ib_spec->eth.mask.vlan_tag) { | 1440 | if (ib_spec->eth.mask.vlan_tag) { |
1433 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, | 1441 | MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, |
1434 | vlan_tag, 1); | 1442 | vlan_tag, 1); |
@@ -1850,6 +1858,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, | |||
1850 | int domain) | 1858 | int domain) |
1851 | { | 1859 | { |
1852 | struct mlx5_ib_dev *dev = to_mdev(qp->device); | 1860 | struct mlx5_ib_dev *dev = to_mdev(qp->device); |
1861 | struct mlx5_ib_qp *mqp = to_mqp(qp); | ||
1853 | struct mlx5_ib_flow_handler *handler = NULL; | 1862 | struct mlx5_ib_flow_handler *handler = NULL; |
1854 | struct mlx5_flow_destination *dst = NULL; | 1863 | struct mlx5_flow_destination *dst = NULL; |
1855 | struct mlx5_ib_flow_prio *ft_prio; | 1864 | struct mlx5_ib_flow_prio *ft_prio; |
@@ -1876,7 +1885,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, | |||
1876 | } | 1885 | } |
1877 | 1886 | ||
1878 | dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; | 1887 | dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; |
1879 | dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn; | 1888 | if (mqp->flags & MLX5_IB_QP_RSS) |
1889 | dst->tir_num = mqp->rss_qp.tirn; | ||
1890 | else | ||
1891 | dst->tir_num = mqp->raw_packet_qp.rq.tirn; | ||
1880 | 1892 | ||
1881 | if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { | 1893 | if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { |
1882 | if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { | 1894 | if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { |
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 40df2cca0609..996b54e366b0 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c | |||
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
71 | 71 | ||
72 | addr = addr >> page_shift; | 72 | addr = addr >> page_shift; |
73 | tmp = (unsigned long)addr; | 73 | tmp = (unsigned long)addr; |
74 | m = find_first_bit(&tmp, sizeof(tmp)); | 74 | m = find_first_bit(&tmp, BITS_PER_LONG); |
75 | skip = 1 << m; | 75 | skip = 1 << m; |
76 | mask = skip - 1; | 76 | mask = skip - 1; |
77 | i = 0; | 77 | i = 0; |
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
81 | for (k = 0; k < len; k++) { | 81 | for (k = 0; k < len; k++) { |
82 | if (!(i & mask)) { | 82 | if (!(i & mask)) { |
83 | tmp = (unsigned long)pfn; | 83 | tmp = (unsigned long)pfn; |
84 | m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp))); | 84 | m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); |
85 | skip = 1 << m; | 85 | skip = 1 << m; |
86 | mask = skip - 1; | 86 | mask = skip - 1; |
87 | base = pfn; | 87 | base = pfn; |
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, | |||
89 | } else { | 89 | } else { |
90 | if (base + p != pfn) { | 90 | if (base + p != pfn) { |
91 | tmp = (unsigned long)p; | 91 | tmp = (unsigned long)p; |
92 | m = find_first_bit(&tmp, sizeof(tmp)); | 92 | m = find_first_bit(&tmp, BITS_PER_LONG); |
93 | skip = 1 << m; | 93 | skip = 1 << m; |
94 | mask = skip - 1; | 94 | mask = skip - 1; |
95 | base = pfn; | 95 | base = pfn; |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 372385d0f993..95146f4aa3e3 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags { | |||
402 | /* QP uses 1 as its source QP number */ | 402 | /* QP uses 1 as its source QP number */ |
403 | MLX5_IB_QP_SQPN_QP1 = 1 << 6, | 403 | MLX5_IB_QP_SQPN_QP1 = 1 << 6, |
404 | MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, | 404 | MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, |
405 | MLX5_IB_QP_RSS = 1 << 8, | ||
405 | }; | 406 | }; |
406 | 407 | ||
407 | struct mlx5_umr_wr { | 408 | struct mlx5_umr_wr { |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0dd7d93cac95..affc3f6598ca 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1449,6 +1449,7 @@ create_tir: | |||
1449 | kvfree(in); | 1449 | kvfree(in); |
1450 | /* qpn is reserved for that QP */ | 1450 | /* qpn is reserved for that QP */ |
1451 | qp->trans_qp.base.mqp.qpn = 0; | 1451 | qp->trans_qp.base.mqp.qpn = 0; |
1452 | qp->flags |= MLX5_IB_QP_RSS; | ||
1452 | return 0; | 1453 | return 0; |
1453 | 1454 | ||
1454 | err: | 1455 | err: |
@@ -3658,12 +3659,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | |||
3658 | struct ib_send_wr *wr, unsigned *idx, | 3659 | struct ib_send_wr *wr, unsigned *idx, |
3659 | int *size, int nreq) | 3660 | int *size, int nreq) |
3660 | { | 3661 | { |
3661 | int err = 0; | 3662 | if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) |
3662 | 3663 | return -ENOMEM; | |
3663 | if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { | ||
3664 | err = -ENOMEM; | ||
3665 | return err; | ||
3666 | } | ||
3667 | 3664 | ||
3668 | *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); | 3665 | *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); |
3669 | *seg = mlx5_get_send_wqe(qp, *idx); | 3666 | *seg = mlx5_get_send_wqe(qp, *idx); |
@@ -3679,7 +3676,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | |||
3679 | *seg += sizeof(**ctrl); | 3676 | *seg += sizeof(**ctrl); |
3680 | *size = sizeof(**ctrl) / 16; | 3677 | *size = sizeof(**ctrl) / 16; |
3681 | 3678 | ||
3682 | return err; | 3679 | return 0; |
3683 | } | 3680 | } |
3684 | 3681 | ||
3685 | static void finish_wqe(struct mlx5_ib_qp *qp, | 3682 | static void finish_wqe(struct mlx5_ib_qp *qp, |
@@ -3758,7 +3755,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3758 | num_sge = wr->num_sge; | 3755 | num_sge = wr->num_sge; |
3759 | if (unlikely(num_sge > qp->sq.max_gs)) { | 3756 | if (unlikely(num_sge > qp->sq.max_gs)) { |
3760 | mlx5_ib_warn(dev, "\n"); | 3757 | mlx5_ib_warn(dev, "\n"); |
3761 | err = -ENOMEM; | 3758 | err = -EINVAL; |
3762 | *bad_wr = wr; | 3759 | *bad_wr = wr; |
3763 | goto out; | 3760 | goto out; |
3764 | } | 3761 | } |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 16740dcb876b..67fc0b6857e1 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
@@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
1156 | attr->max_srq = | 1156 | attr->max_srq = |
1157 | (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> | 1157 | (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> |
1158 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; | 1158 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; |
1159 | attr->max_send_sge = ((rsp->max_write_send_sge & | 1159 | attr->max_send_sge = ((rsp->max_recv_send_sge & |
1160 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | 1160 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> |
1161 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); | 1161 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); |
1162 | attr->max_recv_sge = (rsp->max_write_send_sge & | 1162 | attr->max_recv_sge = (rsp->max_recv_send_sge & |
1163 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | 1163 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >> |
1164 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; | 1164 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT; |
1165 | attr->max_srq_sge = (rsp->max_srq_rqe_sge & | 1165 | attr->max_srq_sge = (rsp->max_srq_rqe_sge & |
1166 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> | 1166 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> |
1167 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; | 1167 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; |
1168 | attr->max_rdma_sge = (rsp->max_write_send_sge & | 1168 | attr->max_rdma_sge = (rsp->max_wr_rd_sge & |
1169 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >> | 1169 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >> |
1170 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT; | 1170 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT; |
1171 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & | 1171 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & |
1172 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> | 1172 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> |
1173 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; | 1173 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 0efc9662c6d8..37df4481bb8f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h | |||
@@ -554,9 +554,9 @@ enum { | |||
554 | OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18, | 554 | OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18, |
555 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, | 555 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, |
556 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, | 556 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, |
557 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16, | 557 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16, |
558 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF << | 558 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF << |
559 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT, | 559 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT, |
560 | 560 | ||
561 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, | 561 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, |
562 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, | 562 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, |
@@ -612,6 +612,8 @@ enum { | |||
612 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, | 612 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, |
613 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << | 613 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << |
614 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, | 614 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, |
615 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0, | ||
616 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF, | ||
615 | }; | 617 | }; |
616 | 618 | ||
617 | struct ocrdma_mbx_query_config { | 619 | struct ocrdma_mbx_query_config { |
@@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config { | |||
619 | struct ocrdma_mbx_rsp rsp; | 621 | struct ocrdma_mbx_rsp rsp; |
620 | u32 qp_srq_cq_ird_ord; | 622 | u32 qp_srq_cq_ird_ord; |
621 | u32 max_pd_ca_ack_delay; | 623 | u32 max_pd_ca_ack_delay; |
622 | u32 max_write_send_sge; | 624 | u32 max_recv_send_sge; |
623 | u32 max_ird_ord_per_qp; | 625 | u32 max_ird_ord_per_qp; |
624 | u32 max_shared_ird_ord; | 626 | u32 max_shared_ird_ord; |
625 | u32 max_mr; | 627 | u32 max_mr; |
@@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config { | |||
639 | u32 max_wqes_rqes_per_q; | 641 | u32 max_wqes_rqes_per_q; |
640 | u32 max_cq_cqes_per_cq; | 642 | u32 max_cq_cqes_per_cq; |
641 | u32 max_srq_rqe_sge; | 643 | u32 max_srq_rqe_sge; |
644 | u32 max_wr_rd_sge; | ||
645 | u32 ird_pgsz_num_pages; | ||
642 | }; | 646 | }; |
643 | 647 | ||
644 | struct ocrdma_fw_ver_rsp { | 648 | struct ocrdma_fw_ver_rsp { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index b1a3d91fe8b9..0aa854737e74 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, | |||
125 | IB_DEVICE_SYS_IMAGE_GUID | | 125 | IB_DEVICE_SYS_IMAGE_GUID | |
126 | IB_DEVICE_LOCAL_DMA_LKEY | | 126 | IB_DEVICE_LOCAL_DMA_LKEY | |
127 | IB_DEVICE_MEM_MGT_EXTENSIONS; | 127 | IB_DEVICE_MEM_MGT_EXTENSIONS; |
128 | attr->max_sge = dev->attr.max_send_sge; | 128 | attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge); |
129 | attr->max_sge_rd = attr->max_sge; | 129 | attr->max_sge_rd = dev->attr.max_rdma_sge; |
130 | attr->max_cq = dev->attr.max_cq; | 130 | attr->max_cq = dev->attr.max_cq; |
131 | attr->max_cqe = dev->attr.max_cqe; | 131 | attr->max_cqe = dev->attr.max_cqe; |
132 | attr->max_mr = dev->attr.max_mr; | 132 | attr->max_mr = dev->attr.max_mr; |
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c index 5e75b43c596b..5bad8e3b40bb 100644 --- a/drivers/infiniband/hw/qib/qib_debugfs.c +++ b/drivers/infiniband/hw/qib/qib_debugfs.c | |||
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v) | |||
189 | DEBUGFS_FILE(ctx_stats) | 189 | DEBUGFS_FILE(ctx_stats) |
190 | 190 | ||
191 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) | 191 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) |
192 | __acquires(RCU) | ||
192 | { | 193 | { |
193 | struct qib_qp_iter *iter; | 194 | struct qib_qp_iter *iter; |
194 | loff_t n = *pos; | 195 | loff_t n = *pos; |
195 | 196 | ||
196 | rcu_read_lock(); | ||
197 | iter = qib_qp_iter_init(s->private); | 197 | iter = qib_qp_iter_init(s->private); |
198 | |||
199 | /* stop calls rcu_read_unlock */ | ||
200 | rcu_read_lock(); | ||
201 | |||
198 | if (!iter) | 202 | if (!iter) |
199 | return NULL; | 203 | return NULL; |
200 | 204 | ||
201 | while (n--) { | 205 | do { |
202 | if (qib_qp_iter_next(iter)) { | 206 | if (qib_qp_iter_next(iter)) { |
203 | kfree(iter); | 207 | kfree(iter); |
204 | return NULL; | 208 | return NULL; |
205 | } | 209 | } |
206 | } | 210 | } while (n--); |
207 | 211 | ||
208 | return iter; | 212 | return iter; |
209 | } | 213 | } |
210 | 214 | ||
211 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | 215 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, |
212 | loff_t *pos) | 216 | loff_t *pos) |
217 | __must_hold(RCU) | ||
213 | { | 218 | { |
214 | struct qib_qp_iter *iter = iter_ptr; | 219 | struct qib_qp_iter *iter = iter_ptr; |
215 | 220 | ||
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | |||
224 | } | 229 | } |
225 | 230 | ||
226 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) | 231 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) |
232 | __releases(RCU) | ||
227 | { | 233 | { |
228 | rcu_read_unlock(); | 234 | rcu_read_unlock(); |
229 | } | 235 | } |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index fcdf37913a26..c3edc033f7c4 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
@@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf, | |||
328 | 328 | ||
329 | pos = *ppos; | 329 | pos = *ppos; |
330 | 330 | ||
331 | if (pos != 0) { | 331 | if (pos != 0 || count != sizeof(struct qib_flash)) |
332 | ret = -EINVAL; | 332 | return -EINVAL; |
333 | goto bail; | ||
334 | } | ||
335 | |||
336 | if (count != sizeof(struct qib_flash)) { | ||
337 | ret = -EINVAL; | ||
338 | goto bail; | ||
339 | } | ||
340 | |||
341 | tmp = kmalloc(count, GFP_KERNEL); | ||
342 | if (!tmp) { | ||
343 | ret = -ENOMEM; | ||
344 | goto bail; | ||
345 | } | ||
346 | 333 | ||
347 | if (copy_from_user(tmp, buf, count)) { | 334 | tmp = memdup_user(buf, count); |
348 | ret = -EFAULT; | 335 | if (IS_ERR(tmp)) |
349 | goto bail_tmp; | 336 | return PTR_ERR(tmp); |
350 | } | ||
351 | 337 | ||
352 | dd = private2dd(file); | 338 | dd = private2dd(file); |
353 | if (qib_eeprom_write(dd, pos, tmp, count)) { | 339 | if (qib_eeprom_write(dd, pos, tmp, count)) { |
@@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf, | |||
361 | 347 | ||
362 | bail_tmp: | 348 | bail_tmp: |
363 | kfree(tmp); | 349 | kfree(tmp); |
364 | |||
365 | bail: | ||
366 | return ret; | 350 | return ret; |
367 | } | 351 | } |
368 | 352 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 9cc0aae1d781..f9b8cd2354d1 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) | |||
573 | return NULL; | 573 | return NULL; |
574 | 574 | ||
575 | iter->dev = dev; | 575 | iter->dev = dev; |
576 | if (qib_qp_iter_next(iter)) { | ||
577 | kfree(iter); | ||
578 | return NULL; | ||
579 | } | ||
580 | 576 | ||
581 | return iter; | 577 | return iter; |
582 | } | 578 | } |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index c229b9f4a52d..0a89a955550b 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c | |||
@@ -664,7 +664,8 @@ static int __init usnic_ib_init(void) | |||
664 | return err; | 664 | return err; |
665 | } | 665 | } |
666 | 666 | ||
667 | if (pci_register_driver(&usnic_ib_pci_driver)) { | 667 | err = pci_register_driver(&usnic_ib_pci_driver); |
668 | if (err) { | ||
668 | usnic_err("Unable to register with PCI\n"); | 669 | usnic_err("Unable to register with PCI\n"); |
669 | goto out_umem_fini; | 670 | goto out_umem_fini; |
670 | } | 671 | } |
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 80c4b6b401b8..46b64970058e 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c | |||
@@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr) | |||
294 | { | 294 | { |
295 | rvt_deinit_mregion(&mr->mr); | 295 | rvt_deinit_mregion(&mr->mr); |
296 | rvt_free_lkey(&mr->mr); | 296 | rvt_free_lkey(&mr->mr); |
297 | vfree(mr); | 297 | kfree(mr); |
298 | } | 298 | } |
299 | 299 | ||
300 | /** | 300 | /** |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index bdb540f25a88..870b4f212fbc 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -873,7 +873,8 @@ bail_qpn: | |||
873 | free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); | 873 | free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); |
874 | 874 | ||
875 | bail_rq_wq: | 875 | bail_rq_wq: |
876 | vfree(qp->r_rq.wq); | 876 | if (!qp->ip) |
877 | vfree(qp->r_rq.wq); | ||
877 | 878 | ||
878 | bail_driver_priv: | 879 | bail_driver_priv: |
879 | rdi->driver_f.qp_priv_free(rdi, qp); | 880 | rdi->driver_f.qp_priv_free(rdi, qp); |
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 55f0e8f0ca79..ddd59270ff6d 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c | |||
@@ -362,15 +362,34 @@ static int __init rxe_module_init(void) | |||
362 | return err; | 362 | return err; |
363 | } | 363 | } |
364 | 364 | ||
365 | err = rxe_net_init(); | 365 | err = rxe_net_ipv4_init(); |
366 | if (err) { | 366 | if (err) { |
367 | pr_err("rxe: unable to init\n"); | 367 | pr_err("rxe: unable to init ipv4 tunnel\n"); |
368 | rxe_cache_exit(); | 368 | rxe_cache_exit(); |
369 | return err; | 369 | goto exit; |
370 | } | ||
371 | |||
372 | err = rxe_net_ipv6_init(); | ||
373 | if (err) { | ||
374 | pr_err("rxe: unable to init ipv6 tunnel\n"); | ||
375 | rxe_cache_exit(); | ||
376 | goto exit; | ||
370 | } | 377 | } |
378 | |||
379 | err = register_netdevice_notifier(&rxe_net_notifier); | ||
380 | if (err) { | ||
381 | pr_err("rxe: Failed to rigister netdev notifier\n"); | ||
382 | goto exit; | ||
383 | } | ||
384 | |||
371 | pr_info("rxe: loaded\n"); | 385 | pr_info("rxe: loaded\n"); |
372 | 386 | ||
373 | return 0; | 387 | return 0; |
388 | |||
389 | exit: | ||
390 | rxe_release_udp_tunnel(recv_sockets.sk4); | ||
391 | rxe_release_udp_tunnel(recv_sockets.sk6); | ||
392 | return err; | ||
374 | } | 393 | } |
375 | 394 | ||
376 | static void __exit rxe_module_exit(void) | 395 | static void __exit rxe_module_exit(void) |
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 36f67de44095..1c59ef2c67aa 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c | |||
@@ -689,7 +689,14 @@ int rxe_completer(void *arg) | |||
689 | qp->req.need_retry = 1; | 689 | qp->req.need_retry = 1; |
690 | rxe_run_task(&qp->req.task, 1); | 690 | rxe_run_task(&qp->req.task, 1); |
691 | } | 691 | } |
692 | |||
693 | if (pkt) { | ||
694 | rxe_drop_ref(pkt->qp); | ||
695 | kfree_skb(skb); | ||
696 | } | ||
697 | |||
692 | goto exit; | 698 | goto exit; |
699 | |||
693 | } else { | 700 | } else { |
694 | wqe->status = IB_WC_RETRY_EXC_ERR; | 701 | wqe->status = IB_WC_RETRY_EXC_ERR; |
695 | state = COMPST_ERROR; | 702 | state = COMPST_ERROR; |
@@ -716,6 +723,12 @@ int rxe_completer(void *arg) | |||
716 | case COMPST_ERROR: | 723 | case COMPST_ERROR: |
717 | do_complete(qp, wqe); | 724 | do_complete(qp, wqe); |
718 | rxe_qp_error(qp); | 725 | rxe_qp_error(qp); |
726 | |||
727 | if (pkt) { | ||
728 | rxe_drop_ref(pkt->qp); | ||
729 | kfree_skb(skb); | ||
730 | } | ||
731 | |||
719 | goto exit; | 732 | goto exit; |
720 | } | 733 | } |
721 | } | 734 | } |
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 0b8d2ea8b41d..eedf2f1cafdf 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c | |||
@@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, | |||
275 | return sock; | 275 | return sock; |
276 | } | 276 | } |
277 | 277 | ||
278 | static void rxe_release_udp_tunnel(struct socket *sk) | 278 | void rxe_release_udp_tunnel(struct socket *sk) |
279 | { | 279 | { |
280 | udp_tunnel_sock_release(sk); | 280 | if (sk) |
281 | udp_tunnel_sock_release(sk); | ||
281 | } | 282 | } |
282 | 283 | ||
283 | static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, | 284 | static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, |
@@ -658,51 +659,45 @@ out: | |||
658 | return NOTIFY_OK; | 659 | return NOTIFY_OK; |
659 | } | 660 | } |
660 | 661 | ||
661 | static struct notifier_block rxe_net_notifier = { | 662 | struct notifier_block rxe_net_notifier = { |
662 | .notifier_call = rxe_notify, | 663 | .notifier_call = rxe_notify, |
663 | }; | 664 | }; |
664 | 665 | ||
665 | int rxe_net_init(void) | 666 | int rxe_net_ipv4_init(void) |
666 | { | 667 | { |
667 | int err; | ||
668 | |||
669 | spin_lock_init(&dev_list_lock); | 668 | spin_lock_init(&dev_list_lock); |
670 | 669 | ||
671 | recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, | ||
672 | htons(ROCE_V2_UDP_DPORT), true); | ||
673 | if (IS_ERR(recv_sockets.sk6)) { | ||
674 | recv_sockets.sk6 = NULL; | ||
675 | pr_err("rxe: Failed to create IPv6 UDP tunnel\n"); | ||
676 | return -1; | ||
677 | } | ||
678 | |||
679 | recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, | 670 | recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, |
680 | htons(ROCE_V2_UDP_DPORT), false); | 671 | htons(ROCE_V2_UDP_DPORT), false); |
681 | if (IS_ERR(recv_sockets.sk4)) { | 672 | if (IS_ERR(recv_sockets.sk4)) { |
682 | rxe_release_udp_tunnel(recv_sockets.sk6); | ||
683 | recv_sockets.sk4 = NULL; | 673 | recv_sockets.sk4 = NULL; |
684 | recv_sockets.sk6 = NULL; | ||
685 | pr_err("rxe: Failed to create IPv4 UDP tunnel\n"); | 674 | pr_err("rxe: Failed to create IPv4 UDP tunnel\n"); |
686 | return -1; | 675 | return -1; |
687 | } | 676 | } |
688 | 677 | ||
689 | err = register_netdevice_notifier(&rxe_net_notifier); | 678 | return 0; |
690 | if (err) { | ||
691 | rxe_release_udp_tunnel(recv_sockets.sk6); | ||
692 | rxe_release_udp_tunnel(recv_sockets.sk4); | ||
693 | pr_err("rxe: Failed to rigister netdev notifier\n"); | ||
694 | } | ||
695 | |||
696 | return err; | ||
697 | } | 679 | } |
698 | 680 | ||
699 | void rxe_net_exit(void) | 681 | int rxe_net_ipv6_init(void) |
700 | { | 682 | { |
701 | if (recv_sockets.sk6) | 683 | #if IS_ENABLED(CONFIG_IPV6) |
702 | rxe_release_udp_tunnel(recv_sockets.sk6); | ||
703 | 684 | ||
704 | if (recv_sockets.sk4) | 685 | spin_lock_init(&dev_list_lock); |
705 | rxe_release_udp_tunnel(recv_sockets.sk4); | ||
706 | 686 | ||
687 | recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, | ||
688 | htons(ROCE_V2_UDP_DPORT), true); | ||
689 | if (IS_ERR(recv_sockets.sk6)) { | ||
690 | recv_sockets.sk6 = NULL; | ||
691 | pr_err("rxe: Failed to create IPv6 UDP tunnel\n"); | ||
692 | return -1; | ||
693 | } | ||
694 | #endif | ||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | void rxe_net_exit(void) | ||
699 | { | ||
700 | rxe_release_udp_tunnel(recv_sockets.sk6); | ||
701 | rxe_release_udp_tunnel(recv_sockets.sk4); | ||
707 | unregister_netdevice_notifier(&rxe_net_notifier); | 702 | unregister_netdevice_notifier(&rxe_net_notifier); |
708 | } | 703 | } |
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h index 7b06f76d16cc..0daf7f09e5b5 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.h +++ b/drivers/infiniband/sw/rxe/rxe_net.h | |||
@@ -44,10 +44,13 @@ struct rxe_recv_sockets { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | extern struct rxe_recv_sockets recv_sockets; | 46 | extern struct rxe_recv_sockets recv_sockets; |
47 | extern struct notifier_block rxe_net_notifier; | ||
48 | void rxe_release_udp_tunnel(struct socket *sk); | ||
47 | 49 | ||
48 | struct rxe_dev *rxe_net_add(struct net_device *ndev); | 50 | struct rxe_dev *rxe_net_add(struct net_device *ndev); |
49 | 51 | ||
50 | int rxe_net_init(void); | 52 | int rxe_net_ipv4_init(void); |
53 | int rxe_net_ipv6_init(void); | ||
51 | void rxe_net_exit(void); | 54 | void rxe_net_exit(void); |
52 | 55 | ||
53 | #endif /* RXE_NET_H */ | 56 | #endif /* RXE_NET_H */ |
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 3d464c23e08b..144d2f129fcd 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c | |||
@@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) | |||
312 | * make a copy of the skb to post to the next qp | 312 | * make a copy of the skb to post to the next qp |
313 | */ | 313 | */ |
314 | skb_copy = (mce->qp_list.next != &mcg->qp_list) ? | 314 | skb_copy = (mce->qp_list.next != &mcg->qp_list) ? |
315 | skb_clone(skb, GFP_KERNEL) : NULL; | 315 | skb_clone(skb, GFP_ATOMIC) : NULL; |
316 | 316 | ||
317 | pkt->qp = qp; | 317 | pkt->qp = qp; |
318 | rxe_add_ref(qp); | 318 | rxe_add_ref(qp); |
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 33b2d9d77021..13a848a518e8 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c | |||
@@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, | |||
511 | } | 511 | } |
512 | 512 | ||
513 | static void update_wqe_state(struct rxe_qp *qp, | 513 | static void update_wqe_state(struct rxe_qp *qp, |
514 | struct rxe_send_wqe *wqe, | 514 | struct rxe_send_wqe *wqe, |
515 | struct rxe_pkt_info *pkt, | 515 | struct rxe_pkt_info *pkt) |
516 | enum wqe_state *prev_state) | ||
517 | { | 516 | { |
518 | enum wqe_state prev_state_ = wqe->state; | ||
519 | |||
520 | if (pkt->mask & RXE_END_MASK) { | 517 | if (pkt->mask & RXE_END_MASK) { |
521 | if (qp_type(qp) == IB_QPT_RC) | 518 | if (qp_type(qp) == IB_QPT_RC) |
522 | wqe->state = wqe_state_pending; | 519 | wqe->state = wqe_state_pending; |
523 | } else { | 520 | } else { |
524 | wqe->state = wqe_state_processing; | 521 | wqe->state = wqe_state_processing; |
525 | } | 522 | } |
526 | |||
527 | *prev_state = prev_state_; | ||
528 | } | 523 | } |
529 | 524 | ||
530 | static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, | 525 | static void update_wqe_psn(struct rxe_qp *qp, |
531 | struct rxe_pkt_info *pkt, int payload) | 526 | struct rxe_send_wqe *wqe, |
527 | struct rxe_pkt_info *pkt, | ||
528 | int payload) | ||
532 | { | 529 | { |
533 | /* number of packets left to send including current one */ | 530 | /* number of packets left to send including current one */ |
534 | int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; | 531 | int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; |
@@ -546,9 +543,34 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, | |||
546 | qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK; | 543 | qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK; |
547 | else | 544 | else |
548 | qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; | 545 | qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; |
546 | } | ||
549 | 547 | ||
550 | qp->req.opcode = pkt->opcode; | 548 | static void save_state(struct rxe_send_wqe *wqe, |
549 | struct rxe_qp *qp, | ||
550 | struct rxe_send_wqe *rollback_wqe, | ||
551 | struct rxe_qp *rollback_qp) | ||
552 | { | ||
553 | rollback_wqe->state = wqe->state; | ||
554 | rollback_wqe->first_psn = wqe->first_psn; | ||
555 | rollback_wqe->last_psn = wqe->last_psn; | ||
556 | rollback_qp->req.psn = qp->req.psn; | ||
557 | } | ||
551 | 558 | ||
559 | static void rollback_state(struct rxe_send_wqe *wqe, | ||
560 | struct rxe_qp *qp, | ||
561 | struct rxe_send_wqe *rollback_wqe, | ||
562 | struct rxe_qp *rollback_qp) | ||
563 | { | ||
564 | wqe->state = rollback_wqe->state; | ||
565 | wqe->first_psn = rollback_wqe->first_psn; | ||
566 | wqe->last_psn = rollback_wqe->last_psn; | ||
567 | qp->req.psn = rollback_qp->req.psn; | ||
568 | } | ||
569 | |||
570 | static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, | ||
571 | struct rxe_pkt_info *pkt, int payload) | ||
572 | { | ||
573 | qp->req.opcode = pkt->opcode; | ||
552 | 574 | ||
553 | if (pkt->mask & RXE_END_MASK) | 575 | if (pkt->mask & RXE_END_MASK) |
554 | qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); | 576 | qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); |
@@ -571,7 +593,8 @@ int rxe_requester(void *arg) | |||
571 | int mtu; | 593 | int mtu; |
572 | int opcode; | 594 | int opcode; |
573 | int ret; | 595 | int ret; |
574 | enum wqe_state prev_state; | 596 | struct rxe_qp rollback_qp; |
597 | struct rxe_send_wqe rollback_wqe; | ||
575 | 598 | ||
576 | next_wqe: | 599 | next_wqe: |
577 | if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) | 600 | if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) |
@@ -688,13 +711,21 @@ next_wqe: | |||
688 | goto err; | 711 | goto err; |
689 | } | 712 | } |
690 | 713 | ||
691 | update_wqe_state(qp, wqe, &pkt, &prev_state); | 714 | /* |
715 | * To prevent a race on wqe access between requester and completer, | ||
716 | * wqe members state and psn need to be set before calling | ||
717 | * rxe_xmit_packet(). | ||
718 | * Otherwise, completer might initiate an unjustified retry flow. | ||
719 | */ | ||
720 | save_state(wqe, qp, &rollback_wqe, &rollback_qp); | ||
721 | update_wqe_state(qp, wqe, &pkt); | ||
722 | update_wqe_psn(qp, wqe, &pkt, payload); | ||
692 | ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); | 723 | ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); |
693 | if (ret) { | 724 | if (ret) { |
694 | qp->need_req_skb = 1; | 725 | qp->need_req_skb = 1; |
695 | kfree_skb(skb); | 726 | kfree_skb(skb); |
696 | 727 | ||
697 | wqe->state = prev_state; | 728 | rollback_state(wqe, qp, &rollback_wqe, &rollback_qp); |
698 | 729 | ||
699 | if (ret == -EAGAIN) { | 730 | if (ret == -EAGAIN) { |
700 | rxe_run_task(&qp->req.task, 1); | 731 | rxe_run_task(&qp->req.task, 1); |
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index ebb03b46e2ad..3e0f0f2baace 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c | |||
@@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, | |||
972 | free_rd_atomic_resource(qp, res); | 972 | free_rd_atomic_resource(qp, res); |
973 | rxe_advance_resp_resource(qp); | 973 | rxe_advance_resp_resource(qp); |
974 | 974 | ||
975 | memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb)); | ||
976 | |||
975 | res->type = RXE_ATOMIC_MASK; | 977 | res->type = RXE_ATOMIC_MASK; |
976 | res->atomic.skb = skb; | 978 | res->atomic.skb = skb; |
977 | res->first_psn = qp->resp.psn; | 979 | res->first_psn = ack_pkt.psn; |
978 | res->last_psn = qp->resp.psn; | 980 | res->last_psn = ack_pkt.psn; |
979 | res->cur_psn = qp->resp.psn; | 981 | res->cur_psn = ack_pkt.psn; |
980 | 982 | ||
981 | rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy); | 983 | rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy); |
982 | if (rc) { | 984 | if (rc) { |
@@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, | |||
1116 | rc = RESPST_CLEANUP; | 1118 | rc = RESPST_CLEANUP; |
1117 | goto out; | 1119 | goto out; |
1118 | } | 1120 | } |
1119 | bth_set_psn(SKB_TO_PKT(skb_copy), | 1121 | |
1120 | qp->resp.psn - 1); | ||
1121 | /* Resend the result. */ | 1122 | /* Resend the result. */ |
1122 | rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, | 1123 | rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, |
1123 | pkt, skb_copy); | 1124 | pkt, skb_copy); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 4f7d9b48df64..9dbfcc0ab577 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
478 | struct ipoib_ah *address, u32 qpn); | 478 | struct ipoib_ah *address, u32 qpn); |
479 | void ipoib_reap_ah(struct work_struct *work); | 479 | void ipoib_reap_ah(struct work_struct *work); |
480 | 480 | ||
481 | struct ipoib_path *__path_find(struct net_device *dev, void *gid); | ||
481 | void ipoib_mark_paths_invalid(struct net_device *dev); | 482 | void ipoib_mark_paths_invalid(struct net_device *dev); |
482 | void ipoib_flush_paths(struct net_device *dev); | 483 | void ipoib_flush_paths(struct net_device *dev); |
483 | int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); | 484 | int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 951d9abcca8b..4ad297d3de89 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) | |||
1318 | } | 1318 | } |
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | #define QPN_AND_OPTIONS_OFFSET 4 | ||
1322 | |||
1321 | static void ipoib_cm_tx_start(struct work_struct *work) | 1323 | static void ipoib_cm_tx_start(struct work_struct *work) |
1322 | { | 1324 | { |
1323 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | 1325 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, |
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1326 | struct ipoib_neigh *neigh; | 1328 | struct ipoib_neigh *neigh; |
1327 | struct ipoib_cm_tx *p; | 1329 | struct ipoib_cm_tx *p; |
1328 | unsigned long flags; | 1330 | unsigned long flags; |
1331 | struct ipoib_path *path; | ||
1329 | int ret; | 1332 | int ret; |
1330 | 1333 | ||
1331 | struct ib_sa_path_rec pathrec; | 1334 | struct ib_sa_path_rec pathrec; |
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1338 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); | 1341 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); |
1339 | list_del_init(&p->list); | 1342 | list_del_init(&p->list); |
1340 | neigh = p->neigh; | 1343 | neigh = p->neigh; |
1344 | |||
1341 | qpn = IPOIB_QPN(neigh->daddr); | 1345 | qpn = IPOIB_QPN(neigh->daddr); |
1346 | /* | ||
1347 | * As long as the search is with these 2 locks, | ||
1348 | * path existence indicates its validity. | ||
1349 | */ | ||
1350 | path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); | ||
1351 | if (!path) { | ||
1352 | pr_info("%s ignore not valid path %pI6\n", | ||
1353 | __func__, | ||
1354 | neigh->daddr + QPN_AND_OPTIONS_OFFSET); | ||
1355 | goto free_neigh; | ||
1356 | } | ||
1342 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); | 1357 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); |
1343 | 1358 | ||
1344 | spin_unlock_irqrestore(&priv->lock, flags); | 1359 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1350 | spin_lock_irqsave(&priv->lock, flags); | 1365 | spin_lock_irqsave(&priv->lock, flags); |
1351 | 1366 | ||
1352 | if (ret) { | 1367 | if (ret) { |
1368 | free_neigh: | ||
1353 | neigh = p->neigh; | 1369 | neigh = p->neigh; |
1354 | if (neigh) { | 1370 | if (neigh) { |
1355 | neigh->cm = NULL; | 1371 | neigh->cm = NULL; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index dc6d241b9406..be11d5d5b8c1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, | |||
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | if (level == IPOIB_FLUSH_LIGHT) { | 1163 | if (level == IPOIB_FLUSH_LIGHT) { |
1164 | int oper_up; | ||
1164 | ipoib_mark_paths_invalid(dev); | 1165 | ipoib_mark_paths_invalid(dev); |
1166 | /* Set IPoIB operation as down to prevent races between: | ||
1167 | * the flush flow which leaves MCG and on the fly joins | ||
1168 | * which can happen during that time. mcast restart task | ||
1169 | * should deal with join requests we missed. | ||
1170 | */ | ||
1171 | oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); | ||
1165 | ipoib_mcast_dev_flush(dev); | 1172 | ipoib_mcast_dev_flush(dev); |
1173 | if (oper_up) | ||
1174 | set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); | ||
1166 | ipoib_flush_ah(dev); | 1175 | ipoib_flush_ah(dev); |
1167 | } | 1176 | } |
1168 | 1177 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 74bcaa064226..cc1c1b062ea5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) | |||
485 | return -EINVAL; | 485 | return -EINVAL; |
486 | } | 486 | } |
487 | 487 | ||
488 | static struct ipoib_path *__path_find(struct net_device *dev, void *gid) | 488 | struct ipoib_path *__path_find(struct net_device *dev, void *gid) |
489 | { | 489 | { |
490 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 490 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
491 | struct rb_node *n = priv->path_tree.rb_node; | 491 | struct rb_node *n = priv->path_tree.rb_node; |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ba6be060a476..cae9bbcc27e7 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn) | |||
403 | INIT_LIST_HEAD(&isert_conn->node); | 403 | INIT_LIST_HEAD(&isert_conn->node); |
404 | init_completion(&isert_conn->login_comp); | 404 | init_completion(&isert_conn->login_comp); |
405 | init_completion(&isert_conn->login_req_comp); | 405 | init_completion(&isert_conn->login_req_comp); |
406 | init_waitqueue_head(&isert_conn->rem_wait); | ||
406 | kref_init(&isert_conn->kref); | 407 | kref_init(&isert_conn->kref); |
407 | mutex_init(&isert_conn->mutex); | 408 | mutex_init(&isert_conn->mutex); |
408 | INIT_WORK(&isert_conn->release_work, isert_release_work); | 409 | INIT_WORK(&isert_conn->release_work, isert_release_work); |
@@ -448,7 +449,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, | |||
448 | 449 | ||
449 | isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); | 450 | isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); |
450 | if (!isert_conn->login_rsp_buf) { | 451 | if (!isert_conn->login_rsp_buf) { |
451 | isert_err("Unable to allocate isert_conn->login_rspbuf\n"); | 452 | ret = -ENOMEM; |
452 | goto out_unmap_login_req_buf; | 453 | goto out_unmap_login_req_buf; |
453 | } | 454 | } |
454 | 455 | ||
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
578 | BUG_ON(!device); | 579 | BUG_ON(!device); |
579 | 580 | ||
580 | isert_free_rx_descriptors(isert_conn); | 581 | isert_free_rx_descriptors(isert_conn); |
581 | if (isert_conn->cm_id) | 582 | if (isert_conn->cm_id && |
583 | !isert_conn->dev_removed) | ||
582 | rdma_destroy_id(isert_conn->cm_id); | 584 | rdma_destroy_id(isert_conn->cm_id); |
583 | 585 | ||
584 | if (isert_conn->qp) { | 586 | if (isert_conn->qp) { |
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn) | |||
593 | 595 | ||
594 | isert_device_put(device); | 596 | isert_device_put(device); |
595 | 597 | ||
596 | kfree(isert_conn); | 598 | if (isert_conn->dev_removed) |
599 | wake_up_interruptible(&isert_conn->rem_wait); | ||
600 | else | ||
601 | kfree(isert_conn); | ||
597 | } | 602 | } |
598 | 603 | ||
599 | static void | 604 | static void |
@@ -753,6 +758,7 @@ static int | |||
753 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | 758 | isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
754 | { | 759 | { |
755 | struct isert_np *isert_np = cma_id->context; | 760 | struct isert_np *isert_np = cma_id->context; |
761 | struct isert_conn *isert_conn; | ||
756 | int ret = 0; | 762 | int ret = 0; |
757 | 763 | ||
758 | isert_info("%s (%d): status %d id %p np %p\n", | 764 | isert_info("%s (%d): status %d id %p np %p\n", |
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) | |||
773 | break; | 779 | break; |
774 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ | 780 | case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ |
775 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ | 781 | case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ |
776 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ | ||
777 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ | 782 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ |
778 | ret = isert_disconnected_handler(cma_id, event->event); | 783 | ret = isert_disconnected_handler(cma_id, event->event); |
779 | break; | 784 | break; |
785 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | ||
786 | isert_conn = cma_id->qp->qp_context; | ||
787 | isert_conn->dev_removed = true; | ||
788 | isert_disconnected_handler(cma_id, event->event); | ||
789 | wait_event_interruptible(isert_conn->rem_wait, | ||
790 | isert_conn->state == ISER_CONN_DOWN); | ||
791 | kfree(isert_conn); | ||
792 | /* | ||
793 | * return non-zero from the callback to destroy | ||
794 | * the rdma cm id | ||
795 | */ | ||
796 | return 1; | ||
780 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ | 797 | case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ |
781 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ | 798 | case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ |
782 | case RDMA_CM_EVENT_CONNECT_ERROR: | 799 | case RDMA_CM_EVENT_CONNECT_ERROR: |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index fc791efe3a10..c02ada57d7f5 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h | |||
@@ -158,6 +158,8 @@ struct isert_conn { | |||
158 | struct work_struct release_work; | 158 | struct work_struct release_work; |
159 | bool logout_posted; | 159 | bool logout_posted; |
160 | bool snd_w_inv; | 160 | bool snd_w_inv; |
161 | wait_queue_head_t rem_wait; | ||
162 | bool dev_removed; | ||
161 | }; | 163 | }; |
162 | 164 | ||
163 | #define ISERT_MAX_CQ 64 | 165 | #define ISERT_MAX_CQ 64 |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index dfa23b075a88..883bbfe08e0e 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport) | |||
522 | if (ret) | 522 | if (ret) |
523 | goto err_query_port; | 523 | goto err_query_port; |
524 | 524 | ||
525 | snprintf(sport->port_guid, sizeof(sport->port_guid), | ||
526 | "0x%016llx%016llx", | ||
527 | be64_to_cpu(sport->gid.global.subnet_prefix), | ||
528 | be64_to_cpu(sport->gid.global.interface_id)); | ||
529 | |||
525 | if (!sport->mad_agent) { | 530 | if (!sport->mad_agent) { |
526 | memset(®_req, 0, sizeof(reg_req)); | 531 | memset(®_req, 0, sizeof(reg_req)); |
527 | reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; | 532 | reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; |
@@ -2548,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device) | |||
2548 | sdev->device->name, i); | 2553 | sdev->device->name, i); |
2549 | goto err_ring; | 2554 | goto err_ring; |
2550 | } | 2555 | } |
2551 | snprintf(sport->port_guid, sizeof(sport->port_guid), | ||
2552 | "0x%016llx%016llx", | ||
2553 | be64_to_cpu(sport->gid.global.subnet_prefix), | ||
2554 | be64_to_cpu(sport->gid.global.interface_id)); | ||
2555 | } | 2556 | } |
2556 | 2557 | ||
2557 | spin_lock(&srpt_dev_lock); | 2558 | spin_lock(&srpt_dev_lock); |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index 7d61439be5f2..0c07e1023a46 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) | |||
376 | /* Reset the KBC controller to clear all previous status.*/ | 376 | /* Reset the KBC controller to clear all previous status.*/ |
377 | reset_control_assert(kbc->rst); | 377 | reset_control_assert(kbc->rst); |
378 | udelay(100); | 378 | udelay(100); |
379 | reset_control_assert(kbc->rst); | 379 | reset_control_deassert(kbc->rst); |
380 | udelay(100); | 380 | udelay(100); |
381 | 381 | ||
382 | tegra_kbc_config_pins(kbc); | 382 | tegra_kbc_config_pins(kbc); |
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index faa295ec4f31..c83bce89028b 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c | |||
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr, | |||
553 | goto free_struct_buff; | 553 | goto free_struct_buff; |
554 | 554 | ||
555 | reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); | 555 | reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); |
556 | map_offset = 0; | ||
557 | for (i = 0; i < rdesc->num_registers; i++) { | 556 | for (i = 0; i < rdesc->num_registers; i++) { |
558 | struct rmi_register_desc_item *item = &rdesc->registers[i]; | 557 | struct rmi_register_desc_item *item = &rdesc->registers[i]; |
559 | int reg_size = struct_buf[offset]; | 558 | int reg_size = struct_buf[offset]; |
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr, | |||
576 | item->reg = reg; | 575 | item->reg = reg; |
577 | item->reg_size = reg_size; | 576 | item->reg_size = reg_size; |
578 | 577 | ||
578 | map_offset = 0; | ||
579 | |||
579 | do { | 580 | do { |
580 | for (b = 0; b < 7; b++) { | 581 | for (b = 0; b < 7; b++) { |
581 | if (struct_buf[offset] & (0x1 << b)) | 582 | if (struct_buf[offset] & (0x1 << b)) |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index b4d34086e73f..405252a884dd 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -1305,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx) | |||
1305 | serio->write = i8042_aux_write; | 1305 | serio->write = i8042_aux_write; |
1306 | serio->start = i8042_start; | 1306 | serio->start = i8042_start; |
1307 | serio->stop = i8042_stop; | 1307 | serio->stop = i8042_stop; |
1308 | serio->ps2_cmd_mutex = &i8042_mutex; | ||
1308 | serio->port_data = port; | 1309 | serio->port_data = port; |
1309 | serio->dev.parent = &i8042_platform_device->dev; | 1310 | serio->dev.parent = &i8042_platform_device->dev; |
1310 | if (idx < 0) { | 1311 | if (idx < 0) { |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a61b2153ab8c..1ce3ecbe37f8 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi) | |||
1473 | 1473 | ||
1474 | ads784x_hwmon_unregister(spi, ts); | 1474 | ads784x_hwmon_unregister(spi, ts); |
1475 | 1475 | ||
1476 | regulator_disable(ts->reg); | ||
1477 | regulator_put(ts->reg); | 1476 | regulator_put(ts->reg); |
1478 | 1477 | ||
1479 | if (!ts->get_pendown_state) { | 1478 | if (!ts->get_pendown_state) { |
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index 7379fe153cf9..f502c8488be8 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c | |||
@@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client) | |||
390 | data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ | 390 | data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ |
391 | } | 391 | } |
392 | 392 | ||
393 | error = device_property_read_string(dev, "touchscreen-fw-name", &str); | 393 | error = device_property_read_string(dev, "firmware-name", &str); |
394 | if (!error) | 394 | if (!error) |
395 | snprintf(data->fw_name, sizeof(data->fw_name), "%s", str); | 395 | snprintf(data->fw_name, sizeof(data->fw_name), |
396 | "silead/%s", str); | ||
396 | else | 397 | else |
397 | dev_dbg(dev, "Firmware file name read error. Using default."); | 398 | dev_dbg(dev, "Firmware file name read error. Using default."); |
398 | } | 399 | } |
@@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data, | |||
410 | if (!acpi_id) | 411 | if (!acpi_id) |
411 | return -ENODEV; | 412 | return -ENODEV; |
412 | 413 | ||
413 | snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", | 414 | snprintf(data->fw_name, sizeof(data->fw_name), |
414 | acpi_id->id); | 415 | "silead/%s.fw", acpi_id->id); |
415 | 416 | ||
416 | for (i = 0; i < strlen(data->fw_name); i++) | 417 | for (i = 0; i < strlen(data->fw_name); i++) |
417 | data->fw_name[i] = tolower(data->fw_name[i]); | 418 | data->fw_name[i] = tolower(data->fw_name[i]); |
418 | } else { | 419 | } else { |
419 | snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", | 420 | snprintf(data->fw_name, sizeof(data->fw_name), |
420 | id->name); | 421 | "silead/%s.fw", id->name); |
421 | } | 422 | } |
422 | 423 | ||
423 | return 0; | 424 | return 0; |
@@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data, | |||
426 | static int silead_ts_set_default_fw_name(struct silead_ts_data *data, | 427 | static int silead_ts_set_default_fw_name(struct silead_ts_data *data, |
427 | const struct i2c_device_id *id) | 428 | const struct i2c_device_id *id) |
428 | { | 429 | { |
429 | snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name); | 430 | snprintf(data->fw_name, sizeof(data->fw_name), |
431 | "silead/%s.fw", id->name); | ||
430 | return 0; | 432 | return 0; |
431 | } | 433 | } |
432 | #endif | 434 | #endif |
@@ -464,7 +466,7 @@ static int silead_ts_probe(struct i2c_client *client, | |||
464 | return -ENODEV; | 466 | return -ENODEV; |
465 | 467 | ||
466 | /* Power GPIO pin */ | 468 | /* Power GPIO pin */ |
467 | data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); | 469 | data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); |
468 | if (IS_ERR(data->gpio_power)) { | 470 | if (IS_ERR(data->gpio_power)) { |
469 | if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER) | 471 | if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER) |
470 | dev_err(dev, "Shutdown GPIO request failed\n"); | 472 | dev_err(dev, "Shutdown GPIO request failed\n"); |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index ce801170d5f2..641e88761319 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
879 | * We may have concurrent producers, so we need to be careful | 879 | * We may have concurrent producers, so we need to be careful |
880 | * not to touch any of the shadow cmdq state. | 880 | * not to touch any of the shadow cmdq state. |
881 | */ | 881 | */ |
882 | queue_read(cmd, Q_ENT(q, idx), q->ent_dwords); | 882 | queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); |
883 | dev_err(smmu->dev, "skipping command in error state:\n"); | 883 | dev_err(smmu->dev, "skipping command in error state:\n"); |
884 | for (i = 0; i < ARRAY_SIZE(cmd); ++i) | 884 | for (i = 0; i < ARRAY_SIZE(cmd); ++i) |
885 | dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); | 885 | dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); |
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
890 | return; | 890 | return; |
891 | } | 891 | } |
892 | 892 | ||
893 | queue_write(cmd, Q_ENT(q, idx), q->ent_dwords); | 893 | queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); |
894 | } | 894 | } |
895 | 895 | ||
896 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, | 896 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, |
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1034 | case STRTAB_STE_0_CFG_S2_TRANS: | 1034 | case STRTAB_STE_0_CFG_S2_TRANS: |
1035 | ste_live = true; | 1035 | ste_live = true; |
1036 | break; | 1036 | break; |
1037 | case STRTAB_STE_0_CFG_ABORT: | ||
1038 | if (disable_bypass) | ||
1039 | break; | ||
1037 | default: | 1040 | default: |
1038 | BUG(); /* STE corruption */ | 1041 | BUG(); /* STE corruption */ |
1039 | } | 1042 | } |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 4f49fe29f202..2db74ebc3240 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = { | |||
686 | 686 | ||
687 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | 687 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
688 | { | 688 | { |
689 | int flags, ret; | 689 | u32 fsr, fsynr; |
690 | u32 fsr, fsynr, resume; | ||
691 | unsigned long iova; | 690 | unsigned long iova; |
692 | struct iommu_domain *domain = dev; | 691 | struct iommu_domain *domain = dev; |
693 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 692 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
701 | if (!(fsr & FSR_FAULT)) | 700 | if (!(fsr & FSR_FAULT)) |
702 | return IRQ_NONE; | 701 | return IRQ_NONE; |
703 | 702 | ||
704 | if (fsr & FSR_IGN) | ||
705 | dev_err_ratelimited(smmu->dev, | ||
706 | "Unexpected context fault (fsr 0x%x)\n", | ||
707 | fsr); | ||
708 | |||
709 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | 703 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); |
710 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | ||
711 | |||
712 | iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); | 704 | iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); |
713 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | ||
714 | ret = IRQ_HANDLED; | ||
715 | resume = RESUME_RETRY; | ||
716 | } else { | ||
717 | dev_err_ratelimited(smmu->dev, | ||
718 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | ||
719 | iova, fsynr, cfg->cbndx); | ||
720 | ret = IRQ_NONE; | ||
721 | resume = RESUME_TERMINATE; | ||
722 | } | ||
723 | |||
724 | /* Clear the faulting FSR */ | ||
725 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | ||
726 | 705 | ||
727 | /* Retry or terminate any stalled transactions */ | 706 | dev_err_ratelimited(smmu->dev, |
728 | if (fsr & FSR_SS) | 707 | "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n", |
729 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | 708 | fsr, iova, fsynr, cfg->cbndx); |
730 | 709 | ||
731 | return ret; | 710 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); |
711 | return IRQ_HANDLED; | ||
732 | } | 712 | } |
733 | 713 | ||
734 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | 714 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) |
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
837 | } | 817 | } |
838 | 818 | ||
839 | /* SCTLR */ | 819 | /* SCTLR */ |
840 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | 820 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; |
841 | if (stage1) | 821 | if (stage1) |
842 | reg |= SCTLR_S1_ASIDPNE; | 822 | reg |= SCTLR_S1_ASIDPNE; |
843 | #ifdef __BIG_ENDIAN | 823 | #ifdef __BIG_ENDIAN |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 08a1e2f3690f..00c8a08d56e7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
68 | if (!iovad) | 68 | if (!iovad) |
69 | return; | 69 | return; |
70 | 70 | ||
71 | put_iova_domain(iovad); | 71 | if (iovad->granule) |
72 | put_iova_domain(iovad); | ||
72 | kfree(iovad); | 73 | kfree(iovad); |
73 | domain->iova_cookie = NULL; | 74 | domain->iova_cookie = NULL; |
74 | } | 75 | } |
@@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |||
151 | } | 152 | } |
152 | } | 153 | } |
153 | 154 | ||
154 | static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, | 155 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
155 | dma_addr_t dma_limit) | 156 | dma_addr_t dma_limit) |
156 | { | 157 | { |
158 | struct iova_domain *iovad = domain->iova_cookie; | ||
157 | unsigned long shift = iova_shift(iovad); | 159 | unsigned long shift = iova_shift(iovad); |
158 | unsigned long length = iova_align(iovad, size) >> shift; | 160 | unsigned long length = iova_align(iovad, size) >> shift; |
159 | 161 | ||
162 | if (domain->geometry.force_aperture) | ||
163 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | ||
160 | /* | 164 | /* |
161 | * Enforce size-alignment to be safe - there could perhaps be an | 165 | * Enforce size-alignment to be safe - there could perhaps be an |
162 | * attribute to control this per-device, or at least per-domain... | 166 | * attribute to control this per-device, or at least per-domain... |
@@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
314 | if (!pages) | 318 | if (!pages) |
315 | return NULL; | 319 | return NULL; |
316 | 320 | ||
317 | iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); | 321 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask); |
318 | if (!iova) | 322 | if (!iova) |
319 | goto out_free_pages; | 323 | goto out_free_pages; |
320 | 324 | ||
@@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
386 | phys_addr_t phys = page_to_phys(page) + offset; | 390 | phys_addr_t phys = page_to_phys(page) + offset; |
387 | size_t iova_off = iova_offset(iovad, phys); | 391 | size_t iova_off = iova_offset(iovad, phys); |
388 | size_t len = iova_align(iovad, size + iova_off); | 392 | size_t len = iova_align(iovad, size + iova_off); |
389 | struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); | 393 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); |
390 | 394 | ||
391 | if (!iova) | 395 | if (!iova) |
392 | return DMA_ERROR_CODE; | 396 | return DMA_ERROR_CODE; |
@@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
538 | prev = s; | 542 | prev = s; |
539 | } | 543 | } |
540 | 544 | ||
541 | iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); | 545 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); |
542 | if (!iova) | 546 | if (!iova) |
543 | goto out_restore_sg; | 547 | goto out_restore_sg; |
544 | 548 | ||
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8c6139986d7d..def8ca1c982d 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) | |||
286 | int prot = IOMMU_READ; | 286 | int prot = IOMMU_READ; |
287 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); | 287 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); |
288 | 288 | ||
289 | if (attr & ARM_V7S_PTE_AP_RDONLY) | 289 | if (!(attr & ARM_V7S_PTE_AP_RDONLY)) |
290 | prot |= IOMMU_WRITE; | 290 | prot |= IOMMU_WRITE; |
291 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) | 291 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) |
292 | prot |= IOMMU_MMIO; | 292 | prot |= IOMMU_MMIO; |
293 | else if (pte & ARM_V7S_ATTR_C) | 293 | else if (pte & ARM_V7S_ATTR_C) |
294 | prot |= IOMMU_CACHE; | 294 | prot |= IOMMU_CACHE; |
295 | if (pte & ARM_V7S_ATTR_XN(lvl)) | ||
296 | prot |= IOMMU_NOEXEC; | ||
295 | 297 | ||
296 | return prot; | 298 | return prot; |
297 | } | 299 | } |
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 9ed0a8462ccf..3dab13b4a211 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h | |||
@@ -55,19 +55,19 @@ struct mtk_iommu_data { | |||
55 | bool enable_4GB; | 55 | bool enable_4GB; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static int compare_of(struct device *dev, void *data) | 58 | static inline int compare_of(struct device *dev, void *data) |
59 | { | 59 | { |
60 | return dev->of_node == data; | 60 | return dev->of_node == data; |
61 | } | 61 | } |
62 | 62 | ||
63 | static int mtk_iommu_bind(struct device *dev) | 63 | static inline int mtk_iommu_bind(struct device *dev) |
64 | { | 64 | { |
65 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | 65 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
66 | 66 | ||
67 | return component_bind_all(dev, &data->smi_imu); | 67 | return component_bind_all(dev, &data->smi_imu); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void mtk_iommu_unbind(struct device *dev) | 70 | static inline void mtk_iommu_unbind(struct device *dev) |
71 | { | 71 | { |
72 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | 72 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
73 | 73 | ||
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 112e17c2768b..37f952dd9fc9 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c | |||
@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d, | |||
176 | { | 176 | { |
177 | struct irq_domain_chip_generic *dgc = d->gc; | 177 | struct irq_domain_chip_generic *dgc = d->gc; |
178 | struct irq_chip_generic *gc; | 178 | struct irq_chip_generic *gc; |
179 | unsigned long flags; | ||
179 | unsigned smr; | 180 | unsigned smr; |
180 | int idx; | 181 | int idx; |
181 | int ret; | 182 | int ret; |
@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d, | |||
194 | 195 | ||
195 | gc = dgc->gc[idx]; | 196 | gc = dgc->gc[idx]; |
196 | 197 | ||
197 | irq_gc_lock(gc); | 198 | irq_gc_lock_irqsave(gc, flags); |
198 | smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); | 199 | smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); |
199 | aic_common_set_priority(intspec[2], &smr); | 200 | aic_common_set_priority(intspec[2], &smr); |
200 | irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); | 201 | irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); |
201 | irq_gc_unlock(gc); | 202 | irq_gc_unlock_irqrestore(gc, flags); |
202 | 203 | ||
203 | return ret; | 204 | return ret; |
204 | } | 205 | } |
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 4f0d068e1abe..2a624d87a035 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c | |||
@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, | |||
258 | unsigned int *out_type) | 258 | unsigned int *out_type) |
259 | { | 259 | { |
260 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); | 260 | struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0); |
261 | unsigned long flags; | ||
261 | unsigned smr; | 262 | unsigned smr; |
262 | int ret; | 263 | int ret; |
263 | 264 | ||
@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, | |||
269 | if (ret) | 270 | if (ret) |
270 | return ret; | 271 | return ret; |
271 | 272 | ||
272 | irq_gc_lock(bgc); | 273 | irq_gc_lock_irqsave(bgc, flags); |
273 | irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); | 274 | irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); |
274 | smr = irq_reg_readl(bgc, AT91_AIC5_SMR); | 275 | smr = irq_reg_readl(bgc, AT91_AIC5_SMR); |
275 | aic_common_set_priority(intspec[2], &smr); | 276 | aic_common_set_priority(intspec[2], &smr); |
276 | irq_reg_writel(bgc, smr, AT91_AIC5_SMR); | 277 | irq_reg_writel(bgc, smr, AT91_AIC5_SMR); |
277 | irq_gc_unlock(bgc); | 278 | irq_gc_unlock_irqrestore(bgc, flags); |
278 | 279 | ||
279 | return ret; | 280 | return ret; |
280 | } | 281 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 7ceaba81efb4..36b9c28a5c91 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1545,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base) | |||
1545 | u32 val; | 1545 | u32 val; |
1546 | 1546 | ||
1547 | val = readl_relaxed(base + GITS_CTLR); | 1547 | val = readl_relaxed(base + GITS_CTLR); |
1548 | if (val & GITS_CTLR_QUIESCENT) | 1548 | /* |
1549 | * GIC architecture specification requires the ITS to be both | ||
1550 | * disabled and quiescent for writes to GITS_BASER<n> or | ||
1551 | * GITS_CBASER to not have UNPREDICTABLE results. | ||
1552 | */ | ||
1553 | if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) | ||
1549 | return 0; | 1554 | return 0; |
1550 | 1555 | ||
1551 | /* Disable the generation of all interrupts to this ITS */ | 1556 | /* Disable the generation of all interrupts to this ITS */ |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 6fc56c3466b0..da6c0ba61d4f 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu) | |||
548 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | 548 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, |
549 | unsigned long cluster_id) | 549 | unsigned long cluster_id) |
550 | { | 550 | { |
551 | int cpu = *base_cpu; | 551 | int next_cpu, cpu = *base_cpu; |
552 | unsigned long mpidr = cpu_logical_map(cpu); | 552 | unsigned long mpidr = cpu_logical_map(cpu); |
553 | u16 tlist = 0; | 553 | u16 tlist = 0; |
554 | 554 | ||
@@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |||
562 | 562 | ||
563 | tlist |= 1 << (mpidr & 0xf); | 563 | tlist |= 1 << (mpidr & 0xf); |
564 | 564 | ||
565 | cpu = cpumask_next(cpu, mask); | 565 | next_cpu = cpumask_next(cpu, mask); |
566 | if (cpu >= nr_cpu_ids) | 566 | if (next_cpu >= nr_cpu_ids) |
567 | goto out; | 567 | goto out; |
568 | cpu = next_cpu; | ||
568 | 569 | ||
569 | mpidr = cpu_logical_map(cpu); | 570 | mpidr = cpu_logical_map(cpu); |
570 | 571 | ||
@@ -667,13 +668,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
667 | #endif | 668 | #endif |
668 | 669 | ||
669 | #ifdef CONFIG_CPU_PM | 670 | #ifdef CONFIG_CPU_PM |
671 | /* Check whether it's single security state view */ | ||
672 | static bool gic_dist_security_disabled(void) | ||
673 | { | ||
674 | return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; | ||
675 | } | ||
676 | |||
670 | static int gic_cpu_pm_notifier(struct notifier_block *self, | 677 | static int gic_cpu_pm_notifier(struct notifier_block *self, |
671 | unsigned long cmd, void *v) | 678 | unsigned long cmd, void *v) |
672 | { | 679 | { |
673 | if (cmd == CPU_PM_EXIT) { | 680 | if (cmd == CPU_PM_EXIT) { |
674 | gic_enable_redist(true); | 681 | if (gic_dist_security_disabled()) |
682 | gic_enable_redist(true); | ||
675 | gic_cpu_sys_reg_init(); | 683 | gic_cpu_sys_reg_init(); |
676 | } else if (cmd == CPU_PM_ENTER) { | 684 | } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { |
677 | gic_write_grpen1(0); | 685 | gic_write_grpen1(0); |
678 | gic_enable_redist(false); | 686 | gic_enable_redist(false); |
679 | } | 687 | } |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index c2cab572c511..390fac59c6bc 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
769 | int cpu; | 769 | int cpu; |
770 | unsigned long flags, map = 0; | 770 | unsigned long flags, map = 0; |
771 | 771 | ||
772 | if (unlikely(nr_cpu_ids == 1)) { | ||
773 | /* Only one CPU? let's do a self-IPI... */ | ||
774 | writel_relaxed(2 << 24 | irq, | ||
775 | gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); | ||
776 | return; | ||
777 | } | ||
778 | |||
772 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | 779 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
773 | 780 | ||
774 | /* Convert our logical CPU mask into a physical one. */ | 781 | /* Convert our logical CPU mask into a physical one. */ |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index c5f33c3bd228..6185696405d5 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
638 | if (!gic_local_irq_is_routable(intr)) | 638 | if (!gic_local_irq_is_routable(intr)) |
639 | return -EPERM; | 639 | return -EPERM; |
640 | 640 | ||
641 | /* | ||
642 | * HACK: These are all really percpu interrupts, but the rest | ||
643 | * of the MIPS kernel code does not use the percpu IRQ API for | ||
644 | * the CP0 timer and performance counter interrupts. | ||
645 | */ | ||
646 | switch (intr) { | ||
647 | case GIC_LOCAL_INT_TIMER: | ||
648 | case GIC_LOCAL_INT_PERFCTR: | ||
649 | case GIC_LOCAL_INT_FDC: | ||
650 | irq_set_chip_and_handler(virq, | ||
651 | &gic_all_vpes_local_irq_controller, | ||
652 | handle_percpu_irq); | ||
653 | break; | ||
654 | default: | ||
655 | irq_set_chip_and_handler(virq, | ||
656 | &gic_local_irq_controller, | ||
657 | handle_percpu_devid_irq); | ||
658 | irq_set_percpu_devid(virq); | ||
659 | break; | ||
660 | } | ||
661 | |||
662 | spin_lock_irqsave(&gic_lock, flags); | 641 | spin_lock_irqsave(&gic_lock, flags); |
663 | for (i = 0; i < gic_vpes; i++) { | 642 | for (i = 0; i < gic_vpes; i++) { |
664 | u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; | 643 | u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; |
@@ -713,9 +692,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
713 | unsigned long flags; | 692 | unsigned long flags; |
714 | int i; | 693 | int i; |
715 | 694 | ||
716 | irq_set_chip_and_handler(virq, &gic_level_irq_controller, | ||
717 | handle_level_irq); | ||
718 | |||
719 | spin_lock_irqsave(&gic_lock, flags); | 695 | spin_lock_irqsave(&gic_lock, flags); |
720 | gic_map_to_pin(intr, gic_cpu_pin); | 696 | gic_map_to_pin(intr, gic_cpu_pin); |
721 | gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); | 697 | gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); |
@@ -727,12 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
727 | return 0; | 703 | return 0; |
728 | } | 704 | } |
729 | 705 | ||
730 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, | 706 | static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq, |
731 | irq_hw_number_t hw) | 707 | unsigned int hwirq) |
732 | { | 708 | { |
733 | if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) | 709 | struct irq_chip *chip; |
734 | return gic_local_irq_domain_map(d, virq, hw); | 710 | int err; |
735 | return gic_shared_irq_domain_map(d, virq, hw, 0); | 711 | |
712 | if (hwirq >= GIC_SHARED_HWIRQ_BASE) { | ||
713 | err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, | ||
714 | &gic_level_irq_controller, | ||
715 | NULL); | ||
716 | } else { | ||
717 | switch (GIC_HWIRQ_TO_LOCAL(hwirq)) { | ||
718 | case GIC_LOCAL_INT_TIMER: | ||
719 | case GIC_LOCAL_INT_PERFCTR: | ||
720 | case GIC_LOCAL_INT_FDC: | ||
721 | /* | ||
722 | * HACK: These are all really percpu interrupts, but | ||
723 | * the rest of the MIPS kernel code does not use the | ||
724 | * percpu IRQ API for them. | ||
725 | */ | ||
726 | chip = &gic_all_vpes_local_irq_controller; | ||
727 | irq_set_handler(virq, handle_percpu_irq); | ||
728 | break; | ||
729 | |||
730 | default: | ||
731 | chip = &gic_local_irq_controller; | ||
732 | irq_set_handler(virq, handle_percpu_devid_irq); | ||
733 | irq_set_percpu_devid(virq); | ||
734 | break; | ||
735 | } | ||
736 | |||
737 | err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, | ||
738 | chip, NULL); | ||
739 | } | ||
740 | |||
741 | return err; | ||
736 | } | 742 | } |
737 | 743 | ||
738 | static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, | 744 | static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, |
@@ -743,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
743 | int cpu, ret, i; | 749 | int cpu, ret, i; |
744 | 750 | ||
745 | if (spec->type == GIC_DEVICE) { | 751 | if (spec->type == GIC_DEVICE) { |
746 | /* verify that it doesn't conflict with an IPI irq */ | 752 | /* verify that shared irqs don't conflict with an IPI irq */ |
747 | if (test_bit(spec->hwirq, ipi_resrv)) | 753 | if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) && |
754 | test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv)) | ||
748 | return -EBUSY; | 755 | return -EBUSY; |
749 | 756 | ||
750 | hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); | 757 | return gic_setup_dev_chip(d, virq, spec->hwirq); |
751 | |||
752 | return irq_domain_set_hwirq_and_chip(d, virq, hwirq, | ||
753 | &gic_level_irq_controller, | ||
754 | NULL); | ||
755 | } else { | 758 | } else { |
756 | base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); | 759 | base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); |
757 | if (base_hwirq == gic_shared_intrs) { | 760 | if (base_hwirq == gic_shared_intrs) { |
@@ -771,11 +774,13 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
771 | hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); | 774 | hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); |
772 | 775 | ||
773 | ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, | 776 | ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, |
774 | &gic_edge_irq_controller, | 777 | &gic_level_irq_controller, |
775 | NULL); | 778 | NULL); |
776 | if (ret) | 779 | if (ret) |
777 | goto error; | 780 | goto error; |
778 | 781 | ||
782 | irq_set_handler(virq + i, handle_level_irq); | ||
783 | |||
779 | ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); | 784 | ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); |
780 | if (ret) | 785 | if (ret) |
781 | goto error; | 786 | goto error; |
@@ -818,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node, | |||
818 | } | 823 | } |
819 | 824 | ||
820 | static const struct irq_domain_ops gic_irq_domain_ops = { | 825 | static const struct irq_domain_ops gic_irq_domain_ops = { |
821 | .map = gic_irq_domain_map, | ||
822 | .alloc = gic_irq_domain_alloc, | 826 | .alloc = gic_irq_domain_alloc, |
823 | .free = gic_irq_domain_free, | 827 | .free = gic_irq_domain_free, |
824 | .match = gic_irq_domain_match, | 828 | .match = gic_irq_domain_match, |
@@ -849,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
849 | struct irq_fwspec *fwspec = arg; | 853 | struct irq_fwspec *fwspec = arg; |
850 | struct gic_irq_spec spec = { | 854 | struct gic_irq_spec spec = { |
851 | .type = GIC_DEVICE, | 855 | .type = GIC_DEVICE, |
852 | .hwirq = fwspec->param[1], | ||
853 | }; | 856 | }; |
854 | int i, ret; | 857 | int i, ret; |
855 | bool is_shared = fwspec->param[0] == GIC_SHARED; | ||
856 | 858 | ||
857 | if (is_shared) { | 859 | if (fwspec->param[0] == GIC_SHARED) |
858 | ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); | 860 | spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); |
859 | if (ret) | 861 | else |
860 | return ret; | 862 | spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); |
861 | } | ||
862 | |||
863 | for (i = 0; i < nr_irqs; i++) { | ||
864 | irq_hw_number_t hwirq; | ||
865 | 863 | ||
866 | if (is_shared) | 864 | ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); |
867 | hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i); | 865 | if (ret) |
868 | else | 866 | return ret; |
869 | hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i); | ||
870 | 867 | ||
871 | ret = irq_domain_set_hwirq_and_chip(d, virq + i, | 868 | for (i = 0; i < nr_irqs; i++) { |
872 | hwirq, | 869 | ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i); |
873 | &gic_level_irq_controller, | ||
874 | NULL); | ||
875 | if (ret) | 870 | if (ret) |
876 | goto error; | 871 | goto error; |
877 | } | 872 | } |
@@ -890,10 +885,20 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, | |||
890 | return; | 885 | return; |
891 | } | 886 | } |
892 | 887 | ||
888 | static void gic_dev_domain_activate(struct irq_domain *domain, | ||
889 | struct irq_data *d) | ||
890 | { | ||
891 | if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS) | ||
892 | gic_local_irq_domain_map(domain, d->irq, d->hwirq); | ||
893 | else | ||
894 | gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); | ||
895 | } | ||
896 | |||
893 | static struct irq_domain_ops gic_dev_domain_ops = { | 897 | static struct irq_domain_ops gic_dev_domain_ops = { |
894 | .xlate = gic_dev_domain_xlate, | 898 | .xlate = gic_dev_domain_xlate, |
895 | .alloc = gic_dev_domain_alloc, | 899 | .alloc = gic_dev_domain_alloc, |
896 | .free = gic_dev_domain_free, | 900 | .free = gic_dev_domain_free, |
901 | .activate = gic_dev_domain_activate, | ||
897 | }; | 902 | }; |
898 | 903 | ||
899 | static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, | 904 | static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, |
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c index 978eda8d6678..8a3ba565106f 100644 --- a/drivers/macintosh/ams/ams-i2c.c +++ b/drivers/macintosh/ams/ams-i2c.c | |||
@@ -73,7 +73,6 @@ MODULE_DEVICE_TABLE(i2c, ams_id); | |||
73 | static struct i2c_driver ams_i2c_driver = { | 73 | static struct i2c_driver ams_i2c_driver = { |
74 | .driver = { | 74 | .driver = { |
75 | .name = "ams", | 75 | .name = "ams", |
76 | .owner = THIS_MODULE, | ||
77 | }, | 76 | }, |
78 | .probe = ams_i2c_probe, | 77 | .probe = ams_i2c_probe, |
79 | .remove = ams_i2c_remove, | 78 | .remove = ams_i2c_remove, |
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index 3024685e4cca..96d16fca68b2 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c | |||
@@ -668,7 +668,6 @@ static struct platform_driver wf_pm112_driver = { | |||
668 | .remove = wf_pm112_remove, | 668 | .remove = wf_pm112_remove, |
669 | .driver = { | 669 | .driver = { |
670 | .name = "windfarm", | 670 | .name = "windfarm", |
671 | .owner = THIS_MODULE, | ||
672 | }, | 671 | }, |
673 | }; | 672 | }; |
674 | 673 | ||
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c index 2f506b9d5a52..e88cfb36a74d 100644 --- a/drivers/macintosh/windfarm_pm72.c +++ b/drivers/macintosh/windfarm_pm72.c | |||
@@ -789,7 +789,6 @@ static struct platform_driver wf_pm72_driver = { | |||
789 | .remove = wf_pm72_remove, | 789 | .remove = wf_pm72_remove, |
790 | .driver = { | 790 | .driver = { |
791 | .name = "windfarm", | 791 | .name = "windfarm", |
792 | .owner = THIS_MODULE, | ||
793 | }, | 792 | }, |
794 | }; | 793 | }; |
795 | 794 | ||
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c index 82fc86a90c1a..bdfcb8a8bfbb 100644 --- a/drivers/macintosh/windfarm_rm31.c +++ b/drivers/macintosh/windfarm_rm31.c | |||
@@ -682,7 +682,6 @@ static struct platform_driver wf_rm31_driver = { | |||
682 | .remove = wf_rm31_remove, | 682 | .remove = wf_rm31_remove, |
683 | .driver = { | 683 | .driver = { |
684 | .name = "windfarm", | 684 | .name = "windfarm", |
685 | .owner = THIS_MODULE, | ||
686 | }, | 685 | }, |
687 | }; | 686 | }; |
688 | 687 | ||
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 97c372908e78..7817d40d81e7 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig | |||
@@ -127,6 +127,7 @@ config XGENE_SLIMPRO_MBOX | |||
127 | config BCM_PDC_MBOX | 127 | config BCM_PDC_MBOX |
128 | tristate "Broadcom PDC Mailbox" | 128 | tristate "Broadcom PDC Mailbox" |
129 | depends on ARM64 || COMPILE_TEST | 129 | depends on ARM64 || COMPILE_TEST |
130 | depends on HAS_DMA | ||
130 | default ARCH_BCM_IPROC | 131 | default ARCH_BCM_IPROC |
131 | help | 132 | help |
132 | Mailbox implementation for the Broadcom PDC ring manager, | 133 | Mailbox implementation for the Broadcom PDC ring manager, |
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c index cbe0c1ee4ba9..c19dd820ea9b 100644 --- a/drivers/mailbox/bcm-pdc-mailbox.c +++ b/drivers/mailbox/bcm-pdc-mailbox.c | |||
@@ -469,7 +469,7 @@ static const struct file_operations pdc_debugfs_stats = { | |||
469 | * this directory for a SPU. | 469 | * this directory for a SPU. |
470 | * @pdcs: PDC state structure | 470 | * @pdcs: PDC state structure |
471 | */ | 471 | */ |
472 | void pdc_setup_debugfs(struct pdc_state *pdcs) | 472 | static void pdc_setup_debugfs(struct pdc_state *pdcs) |
473 | { | 473 | { |
474 | char spu_stats_name[16]; | 474 | char spu_stats_name[16]; |
475 | 475 | ||
@@ -485,7 +485,7 @@ void pdc_setup_debugfs(struct pdc_state *pdcs) | |||
485 | &pdc_debugfs_stats); | 485 | &pdc_debugfs_stats); |
486 | } | 486 | } |
487 | 487 | ||
488 | void pdc_free_debugfs(void) | 488 | static void pdc_free_debugfs(void) |
489 | { | 489 | { |
490 | if (debugfs_dir && simple_empty(debugfs_dir)) { | 490 | if (debugfs_dir && simple_empty(debugfs_dir)) { |
491 | debugfs_remove_recursive(debugfs_dir); | 491 | debugfs_remove_recursive(debugfs_dir); |
@@ -1191,10 +1191,11 @@ static void pdc_shutdown(struct mbox_chan *chan) | |||
1191 | { | 1191 | { |
1192 | struct pdc_state *pdcs = chan->con_priv; | 1192 | struct pdc_state *pdcs = chan->con_priv; |
1193 | 1193 | ||
1194 | if (pdcs) | 1194 | if (!pdcs) |
1195 | dev_dbg(&pdcs->pdev->dev, | 1195 | return; |
1196 | "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx); | ||
1197 | 1196 | ||
1197 | dev_dbg(&pdcs->pdev->dev, | ||
1198 | "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx); | ||
1198 | pdc_ring_free(pdcs); | 1199 | pdc_ring_free(pdcs); |
1199 | } | 1200 | } |
1200 | 1201 | ||
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 95a4ca6ce6ff..849ad441cd76 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, | |||
760 | if (!d->nr_stripes || | 760 | if (!d->nr_stripes || |
761 | d->nr_stripes > INT_MAX || | 761 | d->nr_stripes > INT_MAX || |
762 | d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { | 762 | d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { |
763 | pr_err("nr_stripes too large"); | 763 | pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", |
764 | (unsigned)d->nr_stripes); | ||
764 | return -ENOMEM; | 765 | return -ENOMEM; |
765 | } | 766 | } |
766 | 767 | ||
@@ -1820,7 +1821,7 @@ static int cache_alloc(struct cache *ca) | |||
1820 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; | 1821 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; |
1821 | 1822 | ||
1822 | if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || | 1823 | if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || |
1823 | !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || | 1824 | !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || |
1824 | !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || | 1825 | !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || |
1825 | !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || | 1826 | !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || |
1826 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || | 1827 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || |
@@ -1844,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1844 | struct block_device *bdev, struct cache *ca) | 1845 | struct block_device *bdev, struct cache *ca) |
1845 | { | 1846 | { |
1846 | char name[BDEVNAME_SIZE]; | 1847 | char name[BDEVNAME_SIZE]; |
1847 | const char *err = NULL; | 1848 | const char *err = NULL; /* must be set for any error case */ |
1848 | int ret = 0; | 1849 | int ret = 0; |
1849 | 1850 | ||
1850 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | 1851 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
@@ -1861,8 +1862,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1861 | ca->discard = CACHE_DISCARD(&ca->sb); | 1862 | ca->discard = CACHE_DISCARD(&ca->sb); |
1862 | 1863 | ||
1863 | ret = cache_alloc(ca); | 1864 | ret = cache_alloc(ca); |
1864 | if (ret != 0) | 1865 | if (ret != 0) { |
1866 | if (ret == -ENOMEM) | ||
1867 | err = "cache_alloc(): -ENOMEM"; | ||
1868 | else | ||
1869 | err = "cache_alloc(): unknown error"; | ||
1865 | goto err; | 1870 | goto err; |
1871 | } | ||
1866 | 1872 | ||
1867 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { | 1873 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { |
1868 | err = "error calling kobject_add"; | 1874 | err = "error calling kobject_add"; |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 6fff794e0c72..13041ee37ad6 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page) | |||
2183 | static ssize_t | 2183 | static ssize_t |
2184 | location_store(struct mddev *mddev, const char *buf, size_t len) | 2184 | location_store(struct mddev *mddev, const char *buf, size_t len) |
2185 | { | 2185 | { |
2186 | int rv; | ||
2186 | 2187 | ||
2188 | rv = mddev_lock(mddev); | ||
2189 | if (rv) | ||
2190 | return rv; | ||
2187 | if (mddev->pers) { | 2191 | if (mddev->pers) { |
2188 | if (!mddev->pers->quiesce) | 2192 | if (!mddev->pers->quiesce) { |
2189 | return -EBUSY; | 2193 | rv = -EBUSY; |
2190 | if (mddev->recovery || mddev->sync_thread) | 2194 | goto out; |
2191 | return -EBUSY; | 2195 | } |
2196 | if (mddev->recovery || mddev->sync_thread) { | ||
2197 | rv = -EBUSY; | ||
2198 | goto out; | ||
2199 | } | ||
2192 | } | 2200 | } |
2193 | 2201 | ||
2194 | if (mddev->bitmap || mddev->bitmap_info.file || | 2202 | if (mddev->bitmap || mddev->bitmap_info.file || |
2195 | mddev->bitmap_info.offset) { | 2203 | mddev->bitmap_info.offset) { |
2196 | /* bitmap already configured. Only option is to clear it */ | 2204 | /* bitmap already configured. Only option is to clear it */ |
2197 | if (strncmp(buf, "none", 4) != 0) | 2205 | if (strncmp(buf, "none", 4) != 0) { |
2198 | return -EBUSY; | 2206 | rv = -EBUSY; |
2207 | goto out; | ||
2208 | } | ||
2199 | if (mddev->pers) { | 2209 | if (mddev->pers) { |
2200 | mddev->pers->quiesce(mddev, 1); | 2210 | mddev->pers->quiesce(mddev, 1); |
2201 | bitmap_destroy(mddev); | 2211 | bitmap_destroy(mddev); |
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len) | |||
2214 | /* nothing to be done */; | 2224 | /* nothing to be done */; |
2215 | else if (strncmp(buf, "file:", 5) == 0) { | 2225 | else if (strncmp(buf, "file:", 5) == 0) { |
2216 | /* Not supported yet */ | 2226 | /* Not supported yet */ |
2217 | return -EINVAL; | 2227 | rv = -EINVAL; |
2228 | goto out; | ||
2218 | } else { | 2229 | } else { |
2219 | int rv; | ||
2220 | if (buf[0] == '+') | 2230 | if (buf[0] == '+') |
2221 | rv = kstrtoll(buf+1, 10, &offset); | 2231 | rv = kstrtoll(buf+1, 10, &offset); |
2222 | else | 2232 | else |
2223 | rv = kstrtoll(buf, 10, &offset); | 2233 | rv = kstrtoll(buf, 10, &offset); |
2224 | if (rv) | 2234 | if (rv) |
2225 | return rv; | 2235 | goto out; |
2226 | if (offset == 0) | 2236 | if (offset == 0) { |
2227 | return -EINVAL; | 2237 | rv = -EINVAL; |
2238 | goto out; | ||
2239 | } | ||
2228 | if (mddev->bitmap_info.external == 0 && | 2240 | if (mddev->bitmap_info.external == 0 && |
2229 | mddev->major_version == 0 && | 2241 | mddev->major_version == 0 && |
2230 | offset != mddev->bitmap_info.default_offset) | 2242 | offset != mddev->bitmap_info.default_offset) { |
2231 | return -EINVAL; | 2243 | rv = -EINVAL; |
2244 | goto out; | ||
2245 | } | ||
2232 | mddev->bitmap_info.offset = offset; | 2246 | mddev->bitmap_info.offset = offset; |
2233 | if (mddev->pers) { | 2247 | if (mddev->pers) { |
2234 | struct bitmap *bitmap; | 2248 | struct bitmap *bitmap; |
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len) | |||
2245 | mddev->pers->quiesce(mddev, 0); | 2259 | mddev->pers->quiesce(mddev, 0); |
2246 | if (rv) { | 2260 | if (rv) { |
2247 | bitmap_destroy(mddev); | 2261 | bitmap_destroy(mddev); |
2248 | return rv; | 2262 | goto out; |
2249 | } | 2263 | } |
2250 | } | 2264 | } |
2251 | } | 2265 | } |
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len) | |||
2257 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 2271 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
2258 | md_wakeup_thread(mddev->thread); | 2272 | md_wakeup_thread(mddev->thread); |
2259 | } | 2273 | } |
2274 | rv = 0; | ||
2275 | out: | ||
2276 | mddev_unlock(mddev); | ||
2277 | if (rv) | ||
2278 | return rv; | ||
2260 | return len; | 2279 | return len; |
2261 | } | 2280 | } |
2262 | 2281 | ||
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 6571c81465e1..8625040bae92 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1879,7 +1879,7 @@ static int __init dm_bufio_init(void) | |||
1879 | __cache_size_refresh(); | 1879 | __cache_size_refresh(); |
1880 | mutex_unlock(&dm_bufio_clients_lock); | 1880 | mutex_unlock(&dm_bufio_clients_lock); |
1881 | 1881 | ||
1882 | dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache"); | 1882 | dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0); |
1883 | if (!dm_bufio_wq) | 1883 | if (!dm_bufio_wq) |
1884 | return -ENOMEM; | 1884 | return -ENOMEM; |
1885 | 1885 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4e9784b4e0ac..874295757caa 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -181,7 +181,7 @@ struct crypt_config { | |||
181 | u8 key[0]; | 181 | u8 key[0]; |
182 | }; | 182 | }; |
183 | 183 | ||
184 | #define MIN_IOS 16 | 184 | #define MIN_IOS 64 |
185 | 185 | ||
186 | static void clone_init(struct dm_crypt_io *, struct bio *); | 186 | static void clone_init(struct dm_crypt_io *, struct bio *); |
187 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | 187 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
@@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) | |||
1453 | unsigned i; | 1453 | unsigned i; |
1454 | int err; | 1454 | int err; |
1455 | 1455 | ||
1456 | cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *), | 1456 | cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *), |
1457 | GFP_KERNEL); | 1457 | GFP_KERNEL); |
1458 | if (!cc->tfms) | 1458 | if (!cc->tfms) |
1459 | return -ENOMEM; | 1459 | return -ENOMEM; |
@@ -1924,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) | |||
1924 | return DM_MAPIO_REMAPPED; | 1924 | return DM_MAPIO_REMAPPED; |
1925 | } | 1925 | } |
1926 | 1926 | ||
1927 | /* | ||
1928 | * Check if bio is too large, split as needed. | ||
1929 | */ | ||
1930 | if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && | ||
1931 | bio_data_dir(bio) == WRITE) | ||
1932 | dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); | ||
1933 | |||
1927 | io = dm_per_bio_data(bio, cc->per_bio_data_size); | 1934 | io = dm_per_bio_data(bio, cc->per_bio_data_size); |
1928 | crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | 1935 | crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); |
1929 | io->ctx.req = (struct skcipher_request *)(io + 1); | 1936 | io->ctx.req = (struct skcipher_request *)(io + 1); |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 97e446d54a15..6a2e8dd44a1b 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) | |||
289 | pb->bio_submitted = true; | 289 | pb->bio_submitted = true; |
290 | 290 | ||
291 | /* | 291 | /* |
292 | * Map reads as normal only if corrupt_bio_byte set. | 292 | * Error reads if neither corrupt_bio_byte or drop_writes are set. |
293 | * Otherwise, flakey_end_io() will decide if the reads should be modified. | ||
293 | */ | 294 | */ |
294 | if (bio_data_dir(bio) == READ) { | 295 | if (bio_data_dir(bio) == READ) { |
295 | /* If flags were specified, only corrupt those that match. */ | 296 | if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags)) |
296 | if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && | ||
297 | all_corrupt_bio_flags_match(bio, fc)) | ||
298 | goto map_bio; | ||
299 | else | ||
300 | return -EIO; | 297 | return -EIO; |
298 | goto map_bio; | ||
301 | } | 299 | } |
302 | 300 | ||
303 | /* | 301 | /* |
@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
334 | struct flakey_c *fc = ti->private; | 332 | struct flakey_c *fc = ti->private; |
335 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 333 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); |
336 | 334 | ||
337 | /* | ||
338 | * Corrupt successful READs while in down state. | ||
339 | */ | ||
340 | if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { | 335 | if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { |
341 | if (fc->corrupt_bio_byte) | 336 | if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && |
337 | all_corrupt_bio_flags_match(bio, fc)) { | ||
338 | /* | ||
339 | * Corrupt successful matching READs while in down state. | ||
340 | */ | ||
342 | corrupt_bio_data(bio, fc); | 341 | corrupt_bio_data(bio, fc); |
343 | else | 342 | |
343 | } else if (!test_bit(DROP_WRITES, &fc->flags)) { | ||
344 | /* | ||
345 | * Error read during the down_interval if drop_writes | ||
346 | * wasn't configured. | ||
347 | */ | ||
344 | return -EIO; | 348 | return -EIO; |
349 | } | ||
345 | } | 350 | } |
346 | 351 | ||
347 | return error; | 352 | return error; |
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 4ab68033f9d1..49e4d8d4558f 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c | |||
@@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc, | |||
259 | goto out; | 259 | goto out; |
260 | sector++; | 260 | sector++; |
261 | 261 | ||
262 | bio = bio_alloc(GFP_KERNEL, block->vec_cnt); | 262 | atomic_inc(&lc->io_blocks); |
263 | bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); | ||
263 | if (!bio) { | 264 | if (!bio) { |
264 | DMERR("Couldn't alloc log bio"); | 265 | DMERR("Couldn't alloc log bio"); |
265 | goto error; | 266 | goto error; |
266 | } | 267 | } |
267 | atomic_inc(&lc->io_blocks); | ||
268 | bio->bi_iter.bi_size = 0; | 268 | bio->bi_iter.bi_size = 0; |
269 | bio->bi_iter.bi_sector = sector; | 269 | bio->bi_iter.bi_sector = sector; |
270 | bio->bi_bdev = lc->logdev->bdev; | 270 | bio->bi_bdev = lc->logdev->bdev; |
@@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc, | |||
282 | if (ret != block->vecs[i].bv_len) { | 282 | if (ret != block->vecs[i].bv_len) { |
283 | atomic_inc(&lc->io_blocks); | 283 | atomic_inc(&lc->io_blocks); |
284 | submit_bio(bio); | 284 | submit_bio(bio); |
285 | bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); | 285 | bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES)); |
286 | if (!bio) { | 286 | if (!bio) { |
287 | DMERR("Couldn't alloc log bio"); | 287 | DMERR("Couldn't alloc log bio"); |
288 | goto error; | 288 | goto error; |
@@ -459,9 +459,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
459 | goto bad; | 459 | goto bad; |
460 | } | 460 | } |
461 | 461 | ||
462 | ret = -EINVAL; | ||
463 | lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); | 462 | lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); |
464 | if (!lc->log_kthread) { | 463 | if (IS_ERR(lc->log_kthread)) { |
464 | ret = PTR_ERR(lc->log_kthread); | ||
465 | ti->error = "Couldn't alloc kthread"; | 465 | ti->error = "Couldn't alloc kthread"; |
466 | dm_put_device(ti, lc->dev); | 466 | dm_put_device(ti, lc->dev); |
467 | dm_put_device(ti, lc->logdev); | 467 | dm_put_device(ti, lc->logdev); |
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 4ca2d1df5b44..07fc1ad42ec5 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis | |||
291 | core->nr_regions = le64_to_cpu(disk->nr_regions); | 291 | core->nr_regions = le64_to_cpu(disk->nr_regions); |
292 | } | 292 | } |
293 | 293 | ||
294 | static int rw_header(struct log_c *lc, int rw) | 294 | static int rw_header(struct log_c *lc, int op) |
295 | { | 295 | { |
296 | lc->io_req.bi_op = rw; | 296 | lc->io_req.bi_op = op; |
297 | lc->io_req.bi_op_flags = 0; | ||
297 | 298 | ||
298 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); | 299 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); |
299 | } | 300 | } |
@@ -316,7 +317,7 @@ static int read_header(struct log_c *log) | |||
316 | { | 317 | { |
317 | int r; | 318 | int r; |
318 | 319 | ||
319 | r = rw_header(log, READ); | 320 | r = rw_header(log, REQ_OP_READ); |
320 | if (r) | 321 | if (r) |
321 | return r; | 322 | return r; |
322 | 323 | ||
@@ -630,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log) | |||
630 | header_to_disk(&lc->header, lc->disk_header); | 631 | header_to_disk(&lc->header, lc->disk_header); |
631 | 632 | ||
632 | /* write the new header */ | 633 | /* write the new header */ |
633 | r = rw_header(lc, WRITE); | 634 | r = rw_header(lc, REQ_OP_WRITE); |
634 | if (!r) { | 635 | if (!r) { |
635 | r = flush_header(lc); | 636 | r = flush_header(lc); |
636 | if (r) | 637 | if (r) |
@@ -698,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log) | |||
698 | log_clear_bit(lc, lc->clean_bits, i); | 699 | log_clear_bit(lc, lc->clean_bits, i); |
699 | } | 700 | } |
700 | 701 | ||
701 | r = rw_header(lc, WRITE); | 702 | r = rw_header(lc, REQ_OP_WRITE); |
702 | if (r) | 703 | if (r) |
703 | fail_log_device(lc); | 704 | fail_log_device(lc); |
704 | else { | 705 | else { |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 1b9795d75ef8..8abde6b8cedc 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -191,7 +191,6 @@ struct raid_dev { | |||
191 | #define RT_FLAG_RS_BITMAP_LOADED 2 | 191 | #define RT_FLAG_RS_BITMAP_LOADED 2 |
192 | #define RT_FLAG_UPDATE_SBS 3 | 192 | #define RT_FLAG_UPDATE_SBS 3 |
193 | #define RT_FLAG_RESHAPE_RS 4 | 193 | #define RT_FLAG_RESHAPE_RS 4 |
194 | #define RT_FLAG_KEEP_RS_FROZEN 5 | ||
195 | 194 | ||
196 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ | 195 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ |
197 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) | 196 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) |
@@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
861 | { | 860 | { |
862 | unsigned long min_region_size = rs->ti->len / (1 << 21); | 861 | unsigned long min_region_size = rs->ti->len / (1 << 21); |
863 | 862 | ||
863 | if (rs_is_raid0(rs)) | ||
864 | return 0; | ||
865 | |||
864 | if (!region_size) { | 866 | if (!region_size) { |
865 | /* | 867 | /* |
866 | * Choose a reasonable default. All figures in sectors. | 868 | * Choose a reasonable default. All figures in sectors. |
@@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs) | |||
930 | rebuild_cnt++; | 932 | rebuild_cnt++; |
931 | 933 | ||
932 | switch (rs->raid_type->level) { | 934 | switch (rs->raid_type->level) { |
935 | case 0: | ||
936 | break; | ||
933 | case 1: | 937 | case 1: |
934 | if (rebuild_cnt >= rs->md.raid_disks) | 938 | if (rebuild_cnt >= rs->md.raid_disks) |
935 | goto too_many; | 939 | goto too_many; |
@@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
2335 | case 0: | 2339 | case 0: |
2336 | break; | 2340 | break; |
2337 | default: | 2341 | default: |
2342 | /* | ||
2343 | * We have to keep any raid0 data/metadata device pairs or | ||
2344 | * the MD raid0 personality will fail to start the array. | ||
2345 | */ | ||
2346 | if (rs_is_raid0(rs)) | ||
2347 | continue; | ||
2348 | |||
2338 | dev = container_of(rdev, struct raid_dev, rdev); | 2349 | dev = container_of(rdev, struct raid_dev, rdev); |
2339 | if (dev->meta_dev) | 2350 | if (dev->meta_dev) |
2340 | dm_put_device(ti, dev->meta_dev); | 2351 | dm_put_device(ti, dev->meta_dev); |
@@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs) | |||
2579 | } else { | 2590 | } else { |
2580 | /* Process raid1 without delta_disks */ | 2591 | /* Process raid1 without delta_disks */ |
2581 | mddev->raid_disks = rs->raid_disks; | 2592 | mddev->raid_disks = rs->raid_disks; |
2582 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2583 | reshape = false; | 2593 | reshape = false; |
2584 | } | 2594 | } |
2585 | } else { | 2595 | } else { |
@@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs) | |||
2590 | if (reshape) { | 2600 | if (reshape) { |
2591 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); | 2601 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); |
2592 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2602 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
2593 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2594 | } else if (mddev->raid_disks < rs->raid_disks) | 2603 | } else if (mddev->raid_disks < rs->raid_disks) |
2595 | /* Create new superblocks and bitmaps, if any new disks */ | 2604 | /* Create new superblocks and bitmaps, if any new disks */ |
2596 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2605 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
@@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2902 | goto bad; | 2911 | goto bad; |
2903 | 2912 | ||
2904 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2913 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
2905 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2906 | /* Takeover ain't recovery, so disable recovery */ | 2914 | /* Takeover ain't recovery, so disable recovery */ |
2907 | rs_setup_recovery(rs, MaxSector); | 2915 | rs_setup_recovery(rs, MaxSector); |
2908 | rs_set_new(rs); | 2916 | rs_set_new(rs); |
@@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti) | |||
3386 | { | 3394 | { |
3387 | struct raid_set *rs = ti->private; | 3395 | struct raid_set *rs = ti->private; |
3388 | 3396 | ||
3389 | if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { | 3397 | if (!rs->md.suspended) |
3390 | if (!rs->md.suspended) | 3398 | mddev_suspend(&rs->md); |
3391 | mddev_suspend(&rs->md); | 3399 | |
3392 | rs->md.ro = 1; | 3400 | rs->md.ro = 1; |
3393 | } | ||
3394 | } | 3401 | } |
3395 | 3402 | ||
3396 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) | 3403 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) |
3397 | { | 3404 | { |
3398 | int i; | 3405 | int i; |
3399 | uint64_t failed_devices, cleared_failed_devices = 0; | 3406 | uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS]; |
3400 | unsigned long flags; | 3407 | unsigned long flags; |
3408 | bool cleared = false; | ||
3401 | struct dm_raid_superblock *sb; | 3409 | struct dm_raid_superblock *sb; |
3410 | struct mddev *mddev = &rs->md; | ||
3402 | struct md_rdev *r; | 3411 | struct md_rdev *r; |
3403 | 3412 | ||
3413 | /* RAID personalities have to provide hot add/remove methods or we need to bail out. */ | ||
3414 | if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) | ||
3415 | return; | ||
3416 | |||
3417 | memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); | ||
3418 | |||
3404 | for (i = 0; i < rs->md.raid_disks; i++) { | 3419 | for (i = 0; i < rs->md.raid_disks; i++) { |
3405 | r = &rs->dev[i].rdev; | 3420 | r = &rs->dev[i].rdev; |
3406 | if (test_bit(Faulty, &r->flags) && r->sb_page && | 3421 | if (test_bit(Faulty, &r->flags) && r->sb_page && |
@@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) | |||
3420 | * ourselves. | 3435 | * ourselves. |
3421 | */ | 3436 | */ |
3422 | if ((r->raid_disk >= 0) && | 3437 | if ((r->raid_disk >= 0) && |
3423 | (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) | 3438 | (mddev->pers->hot_remove_disk(mddev, r) != 0)) |
3424 | /* Failed to revive this device, try next */ | 3439 | /* Failed to revive this device, try next */ |
3425 | continue; | 3440 | continue; |
3426 | 3441 | ||
@@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) | |||
3430 | clear_bit(Faulty, &r->flags); | 3445 | clear_bit(Faulty, &r->flags); |
3431 | clear_bit(WriteErrorSeen, &r->flags); | 3446 | clear_bit(WriteErrorSeen, &r->flags); |
3432 | clear_bit(In_sync, &r->flags); | 3447 | clear_bit(In_sync, &r->flags); |
3433 | if (r->mddev->pers->hot_add_disk(r->mddev, r)) { | 3448 | if (mddev->pers->hot_add_disk(mddev, r)) { |
3434 | r->raid_disk = -1; | 3449 | r->raid_disk = -1; |
3435 | r->saved_raid_disk = -1; | 3450 | r->saved_raid_disk = -1; |
3436 | r->flags = flags; | 3451 | r->flags = flags; |
3437 | } else { | 3452 | } else { |
3438 | r->recovery_offset = 0; | 3453 | r->recovery_offset = 0; |
3439 | cleared_failed_devices |= 1 << i; | 3454 | set_bit(i, (void *) cleared_failed_devices); |
3455 | cleared = true; | ||
3440 | } | 3456 | } |
3441 | } | 3457 | } |
3442 | } | 3458 | } |
3443 | if (cleared_failed_devices) { | 3459 | |
3460 | /* If any failed devices could be cleared, update all sbs failed_devices bits */ | ||
3461 | if (cleared) { | ||
3462 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; | ||
3463 | |||
3444 | rdev_for_each(r, &rs->md) { | 3464 | rdev_for_each(r, &rs->md) { |
3445 | sb = page_address(r->sb_page); | 3465 | sb = page_address(r->sb_page); |
3446 | failed_devices = le64_to_cpu(sb->failed_devices); | 3466 | sb_retrieve_failed_devices(sb, failed_devices); |
3447 | failed_devices &= ~cleared_failed_devices; | 3467 | |
3448 | sb->failed_devices = cpu_to_le64(failed_devices); | 3468 | for (i = 0; i < DISKS_ARRAY_ELEMS; i++) |
3469 | failed_devices[i] &= ~cleared_failed_devices[i]; | ||
3470 | |||
3471 | sb_update_failed_devices(sb, failed_devices); | ||
3449 | } | 3472 | } |
3450 | } | 3473 | } |
3451 | } | 3474 | } |
@@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti) | |||
3610 | * devices are reachable again. | 3633 | * devices are reachable again. |
3611 | */ | 3634 | */ |
3612 | attempt_restore_of_faulty_devices(rs); | 3635 | attempt_restore_of_faulty_devices(rs); |
3613 | } else { | 3636 | } |
3614 | mddev->ro = 0; | ||
3615 | mddev->in_sync = 0; | ||
3616 | 3637 | ||
3617 | /* | 3638 | mddev->ro = 0; |
3618 | * When passing in flags to the ctr, we expect userspace | 3639 | mddev->in_sync = 0; |
3619 | * to reset them because they made it to the superblocks | ||
3620 | * and reload the mapping anyway. | ||
3621 | * | ||
3622 | * -> only unfreeze recovery in case of a table reload or | ||
3623 | * we'll have a bogus recovery/reshape position | ||
3624 | * retrieved from the superblock by the ctr because | ||
3625 | * the ongoing recovery/reshape will change it after read. | ||
3626 | */ | ||
3627 | if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags)) | ||
3628 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
3629 | 3640 | ||
3630 | if (mddev->suspended) | 3641 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
3631 | mddev_resume(mddev); | 3642 | |
3632 | } | 3643 | if (mddev->suspended) |
3644 | mddev_resume(mddev); | ||
3633 | } | 3645 | } |
3634 | 3646 | ||
3635 | static struct target_type raid_target = { | 3647 | static struct target_type raid_target = { |
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 4ace1da17db8..6c25213ab38c 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c | |||
@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes) | |||
210 | struct path_info *pi = NULL; | 210 | struct path_info *pi = NULL; |
211 | struct dm_path *current_path = NULL; | 211 | struct dm_path *current_path = NULL; |
212 | 212 | ||
213 | local_irq_save(flags); | ||
213 | current_path = *this_cpu_ptr(s->current_path); | 214 | current_path = *this_cpu_ptr(s->current_path); |
214 | if (current_path) { | 215 | if (current_path) { |
215 | percpu_counter_dec(&s->repeat_count); | 216 | percpu_counter_dec(&s->repeat_count); |
216 | if (percpu_counter_read_positive(&s->repeat_count) > 0) | 217 | if (percpu_counter_read_positive(&s->repeat_count) > 0) { |
218 | local_irq_restore(flags); | ||
217 | return current_path; | 219 | return current_path; |
220 | } | ||
218 | } | 221 | } |
219 | 222 | ||
220 | spin_lock_irqsave(&s->lock, flags); | 223 | spin_lock(&s->lock); |
221 | if (!list_empty(&s->valid_paths)) { | 224 | if (!list_empty(&s->valid_paths)) { |
222 | pi = list_entry(s->valid_paths.next, struct path_info, list); | 225 | pi = list_entry(s->valid_paths.next, struct path_info, list); |
223 | list_move_tail(&pi->list, &s->valid_paths); | 226 | list_move_tail(&pi->list, &s->valid_paths); |
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 41573f1f626f..34a840d9df76 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes) | |||
834 | goto err; | 834 | goto err; |
835 | } | 835 | } |
836 | cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); | 836 | cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); |
837 | if (!cinfo->ack_lockres) | 837 | if (!cinfo->ack_lockres) { |
838 | ret = -ENOMEM; | ||
838 | goto err; | 839 | goto err; |
840 | } | ||
839 | /* get sync CR lock on ACK. */ | 841 | /* get sync CR lock on ACK. */ |
840 | if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) | 842 | if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR)) |
841 | pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", | 843 | pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", |
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes) | |||
849 | pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); | 851 | pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); |
850 | snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); | 852 | snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1); |
851 | cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); | 853 | cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); |
852 | if (!cinfo->bitmap_lockres) | 854 | if (!cinfo->bitmap_lockres) { |
855 | ret = -ENOMEM; | ||
853 | goto err; | 856 | goto err; |
857 | } | ||
854 | if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { | 858 | if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) { |
855 | pr_err("Failed to get bitmap lock\n"); | 859 | pr_err("Failed to get bitmap lock\n"); |
856 | ret = -EINVAL; | 860 | ret = -EINVAL; |
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes) | |||
858 | } | 862 | } |
859 | 863 | ||
860 | cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); | 864 | cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); |
861 | if (!cinfo->resync_lockres) | 865 | if (!cinfo->resync_lockres) { |
866 | ret = -ENOMEM; | ||
862 | goto err; | 867 | goto err; |
868 | } | ||
863 | 869 | ||
864 | return 0; | 870 | return 0; |
865 | err: | 871 | err: |
diff --git a/drivers/md/md.c b/drivers/md/md.c index d646f6e444f0..915e84d631a2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1604,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) | |||
1604 | mddev->new_chunk_sectors = mddev->chunk_sectors; | 1604 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) { | 1607 | if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) |
1608 | set_bit(MD_HAS_JOURNAL, &mddev->flags); | 1608 | set_bit(MD_HAS_JOURNAL, &mddev->flags); |
1609 | if (mddev->recovery_cp == MaxSector) | ||
1610 | set_bit(MD_JOURNAL_CLEAN, &mddev->flags); | ||
1611 | } | ||
1612 | } else if (mddev->pers == NULL) { | 1609 | } else if (mddev->pers == NULL) { |
1613 | /* Insist of good event counter while assembling, except for | 1610 | /* Insist of good event counter while assembling, except for |
1614 | * spares (which don't need an event count) */ | 1611 | * spares (which don't need an event count) */ |
@@ -5851,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg) | |||
5851 | working++; | 5848 | working++; |
5852 | if (test_bit(In_sync, &rdev->flags)) | 5849 | if (test_bit(In_sync, &rdev->flags)) |
5853 | insync++; | 5850 | insync++; |
5851 | else if (test_bit(Journal, &rdev->flags)) | ||
5852 | /* TODO: add journal count to md_u.h */ | ||
5853 | ; | ||
5854 | else | 5854 | else |
5855 | spare++; | 5855 | spare++; |
5856 | } | 5856 | } |
@@ -7610,16 +7610,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations); | |||
7610 | 7610 | ||
7611 | int md_setup_cluster(struct mddev *mddev, int nodes) | 7611 | int md_setup_cluster(struct mddev *mddev, int nodes) |
7612 | { | 7612 | { |
7613 | int err; | 7613 | if (!md_cluster_ops) |
7614 | 7614 | request_module("md-cluster"); | |
7615 | err = request_module("md-cluster"); | ||
7616 | if (err) { | ||
7617 | pr_err("md-cluster module not found.\n"); | ||
7618 | return -ENOENT; | ||
7619 | } | ||
7620 | |||
7621 | spin_lock(&pers_lock); | 7615 | spin_lock(&pers_lock); |
7616 | /* ensure module won't be unloaded */ | ||
7622 | if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { | 7617 | if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { |
7618 | pr_err("can't find md-cluster module or get it's reference.\n"); | ||
7623 | spin_unlock(&pers_lock); | 7619 | spin_unlock(&pers_lock); |
7624 | return -ENOENT; | 7620 | return -ENOENT; |
7625 | } | 7621 | } |
@@ -7862,6 +7858,7 @@ void md_do_sync(struct md_thread *thread) | |||
7862 | */ | 7858 | */ |
7863 | 7859 | ||
7864 | do { | 7860 | do { |
7861 | int mddev2_minor = -1; | ||
7865 | mddev->curr_resync = 2; | 7862 | mddev->curr_resync = 2; |
7866 | 7863 | ||
7867 | try_again: | 7864 | try_again: |
@@ -7891,10 +7888,14 @@ void md_do_sync(struct md_thread *thread) | |||
7891 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); | 7888 | prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); |
7892 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 7889 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
7893 | mddev2->curr_resync >= mddev->curr_resync) { | 7890 | mddev2->curr_resync >= mddev->curr_resync) { |
7894 | printk(KERN_INFO "md: delaying %s of %s" | 7891 | if (mddev2_minor != mddev2->md_minor) { |
7895 | " until %s has finished (they" | 7892 | mddev2_minor = mddev2->md_minor; |
7896 | " share one or more physical units)\n", | 7893 | printk(KERN_INFO "md: delaying %s of %s" |
7897 | desc, mdname(mddev), mdname(mddev2)); | 7894 | " until %s has finished (they" |
7895 | " share one or more physical units)\n", | ||
7896 | desc, mdname(mddev), | ||
7897 | mdname(mddev2)); | ||
7898 | } | ||
7898 | mddev_put(mddev2); | 7899 | mddev_put(mddev2); |
7899 | if (signal_pending(current)) | 7900 | if (signal_pending(current)) |
7900 | flush_signals(current); | 7901 | flush_signals(current); |
@@ -8275,16 +8276,13 @@ no_add: | |||
8275 | static void md_start_sync(struct work_struct *ws) | 8276 | static void md_start_sync(struct work_struct *ws) |
8276 | { | 8277 | { |
8277 | struct mddev *mddev = container_of(ws, struct mddev, del_work); | 8278 | struct mddev *mddev = container_of(ws, struct mddev, del_work); |
8278 | int ret = 0; | ||
8279 | 8279 | ||
8280 | mddev->sync_thread = md_register_thread(md_do_sync, | 8280 | mddev->sync_thread = md_register_thread(md_do_sync, |
8281 | mddev, | 8281 | mddev, |
8282 | "resync"); | 8282 | "resync"); |
8283 | if (!mddev->sync_thread) { | 8283 | if (!mddev->sync_thread) { |
8284 | if (!(mddev_is_clustered(mddev) && ret == -EAGAIN)) | 8284 | printk(KERN_ERR "%s: could not start resync thread...\n", |
8285 | printk(KERN_ERR "%s: could not start resync" | 8285 | mdname(mddev)); |
8286 | " thread...\n", | ||
8287 | mdname(mddev)); | ||
8288 | /* leave the spares where they are, it shouldn't hurt */ | 8286 | /* leave the spares where they are, it shouldn't hurt */ |
8289 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 8287 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
8290 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 8288 | clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0e4efcd10795..be1a9fca3b2d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1064,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio) | |||
1064 | int max_sectors; | 1064 | int max_sectors; |
1065 | int sectors; | 1065 | int sectors; |
1066 | 1066 | ||
1067 | md_write_start(mddev, bio); | ||
1068 | |||
1067 | /* | 1069 | /* |
1068 | * Register the new request and wait if the reconstruction | 1070 | * Register the new request and wait if the reconstruction |
1069 | * thread has put up a bar for new requests. | 1071 | * thread has put up a bar for new requests. |
@@ -1445,8 +1447,6 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) | |||
1445 | return; | 1447 | return; |
1446 | } | 1448 | } |
1447 | 1449 | ||
1448 | md_write_start(mddev, bio); | ||
1449 | |||
1450 | do { | 1450 | do { |
1451 | 1451 | ||
1452 | /* | 1452 | /* |
@@ -2465,20 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) | |||
2465 | 2465 | ||
2466 | while (sect_to_write) { | 2466 | while (sect_to_write) { |
2467 | struct bio *wbio; | 2467 | struct bio *wbio; |
2468 | sector_t wsector; | ||
2468 | if (sectors > sect_to_write) | 2469 | if (sectors > sect_to_write) |
2469 | sectors = sect_to_write; | 2470 | sectors = sect_to_write; |
2470 | /* Write at 'sector' for 'sectors' */ | 2471 | /* Write at 'sector' for 'sectors' */ |
2471 | wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 2472 | wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
2472 | bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); | 2473 | bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); |
2473 | wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ | 2474 | wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); |
2474 | choose_data_offset(r10_bio, rdev) + | 2475 | wbio->bi_iter.bi_sector = wsector + |
2475 | (sector - r10_bio->sector)); | 2476 | choose_data_offset(r10_bio, rdev); |
2476 | wbio->bi_bdev = rdev->bdev; | 2477 | wbio->bi_bdev = rdev->bdev; |
2477 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); | 2478 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2478 | 2479 | ||
2479 | if (submit_bio_wait(wbio) < 0) | 2480 | if (submit_bio_wait(wbio) < 0) |
2480 | /* Failure! */ | 2481 | /* Failure! */ |
2481 | ok = rdev_set_badblocks(rdev, sector, | 2482 | ok = rdev_set_badblocks(rdev, wsector, |
2482 | sectors, 0) | 2483 | sectors, 0) |
2483 | && ok; | 2484 | && ok; |
2484 | 2485 | ||
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 51f76ddbe265..1b1ab4a1d132 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -96,7 +96,6 @@ struct r5l_log { | |||
96 | spinlock_t no_space_stripes_lock; | 96 | spinlock_t no_space_stripes_lock; |
97 | 97 | ||
98 | bool need_cache_flush; | 98 | bool need_cache_flush; |
99 | bool in_teardown; | ||
100 | }; | 99 | }; |
101 | 100 | ||
102 | /* | 101 | /* |
@@ -704,31 +703,22 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, | |||
704 | 703 | ||
705 | mddev = log->rdev->mddev; | 704 | mddev = log->rdev->mddev; |
706 | /* | 705 | /* |
707 | * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and | 706 | * Discard could zero data, so before discard we must make sure |
708 | * wait for this thread to finish. This thread waits for | 707 | * superblock is updated to new log tail. Updating superblock (either |
709 | * MD_CHANGE_PENDING clear, which is supposed to be done in | 708 | * directly call md_update_sb() or depend on md thread) must hold |
710 | * md_check_recovery(). md_check_recovery() tries to get | 709 | * reconfig mutex. On the other hand, raid5_quiesce is called with |
711 | * reconfig_mutex. Since r5l_quiesce already holds the mutex, | 710 | * reconfig_mutex hold. The first step of raid5_quiesce() is waitting |
712 | * md_check_recovery() fails, so the PENDING never get cleared. The | 711 | * for all IO finish, hence waitting for reclaim thread, while reclaim |
713 | * in_teardown check workaround this issue. | 712 | * thread is calling this function and waitting for reconfig mutex. So |
713 | * there is a deadlock. We workaround this issue with a trylock. | ||
714 | * FIXME: we could miss discard if we can't take reconfig mutex | ||
714 | */ | 715 | */ |
715 | if (!log->in_teardown) { | 716 | set_mask_bits(&mddev->flags, 0, |
716 | set_mask_bits(&mddev->flags, 0, | 717 | BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); |
717 | BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); | 718 | if (!mddev_trylock(mddev)) |
718 | md_wakeup_thread(mddev->thread); | 719 | return; |
719 | wait_event(mddev->sb_wait, | 720 | md_update_sb(mddev, 1); |
720 | !test_bit(MD_CHANGE_PENDING, &mddev->flags) || | 721 | mddev_unlock(mddev); |
721 | log->in_teardown); | ||
722 | /* | ||
723 | * r5l_quiesce could run after in_teardown check and hold | ||
724 | * mutex first. Superblock might get updated twice. | ||
725 | */ | ||
726 | if (log->in_teardown) | ||
727 | md_update_sb(mddev, 1); | ||
728 | } else { | ||
729 | WARN_ON(!mddev_is_locked(mddev)); | ||
730 | md_update_sb(mddev, 1); | ||
731 | } | ||
732 | 722 | ||
733 | /* discard IO error really doesn't matter, ignore it */ | 723 | /* discard IO error really doesn't matter, ignore it */ |
734 | if (log->last_checkpoint < end) { | 724 | if (log->last_checkpoint < end) { |
@@ -827,7 +817,6 @@ void r5l_quiesce(struct r5l_log *log, int state) | |||
827 | if (!log || state == 2) | 817 | if (!log || state == 2) |
828 | return; | 818 | return; |
829 | if (state == 0) { | 819 | if (state == 0) { |
830 | log->in_teardown = 0; | ||
831 | /* | 820 | /* |
832 | * This is a special case for hotadd. In suspend, the array has | 821 | * This is a special case for hotadd. In suspend, the array has |
833 | * no journal. In resume, journal is initialized as well as the | 822 | * no journal. In resume, journal is initialized as well as the |
@@ -838,11 +827,6 @@ void r5l_quiesce(struct r5l_log *log, int state) | |||
838 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, | 827 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, |
839 | log->rdev->mddev, "reclaim"); | 828 | log->rdev->mddev, "reclaim"); |
840 | } else if (state == 1) { | 829 | } else if (state == 1) { |
841 | /* | ||
842 | * at this point all stripes are finished, so io_unit is at | ||
843 | * least in STRIPE_END state | ||
844 | */ | ||
845 | log->in_teardown = 1; | ||
846 | /* make sure r5l_write_super_and_discard_space exits */ | 830 | /* make sure r5l_write_super_and_discard_space exits */ |
847 | mddev = log->rdev->mddev; | 831 | mddev = log->rdev->mddev; |
848 | wake_up(&mddev->sb_wait); | 832 | wake_up(&mddev->sb_wait); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8912407a4dd0..ee7fc3701700 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector, | |||
659 | { | 659 | { |
660 | struct stripe_head *sh; | 660 | struct stripe_head *sh; |
661 | int hash = stripe_hash_locks_hash(sector); | 661 | int hash = stripe_hash_locks_hash(sector); |
662 | int inc_empty_inactive_list_flag; | ||
662 | 663 | ||
663 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); | 664 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); |
664 | 665 | ||
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector, | |||
703 | atomic_inc(&conf->active_stripes); | 704 | atomic_inc(&conf->active_stripes); |
704 | BUG_ON(list_empty(&sh->lru) && | 705 | BUG_ON(list_empty(&sh->lru) && |
705 | !test_bit(STRIPE_EXPANDING, &sh->state)); | 706 | !test_bit(STRIPE_EXPANDING, &sh->state)); |
707 | inc_empty_inactive_list_flag = 0; | ||
708 | if (!list_empty(conf->inactive_list + hash)) | ||
709 | inc_empty_inactive_list_flag = 1; | ||
706 | list_del_init(&sh->lru); | 710 | list_del_init(&sh->lru); |
711 | if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) | ||
712 | atomic_inc(&conf->empty_inactive_list_nr); | ||
707 | if (sh->group) { | 713 | if (sh->group) { |
708 | sh->group->stripes_cnt--; | 714 | sh->group->stripes_cnt--; |
709 | sh->group = NULL; | 715 | sh->group = NULL; |
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
762 | sector_t head_sector, tmp_sec; | 768 | sector_t head_sector, tmp_sec; |
763 | int hash; | 769 | int hash; |
764 | int dd_idx; | 770 | int dd_idx; |
771 | int inc_empty_inactive_list_flag; | ||
765 | 772 | ||
766 | /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ | 773 | /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ |
767 | tmp_sec = sh->sector; | 774 | tmp_sec = sh->sector; |
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh | |||
779 | atomic_inc(&conf->active_stripes); | 786 | atomic_inc(&conf->active_stripes); |
780 | BUG_ON(list_empty(&head->lru) && | 787 | BUG_ON(list_empty(&head->lru) && |
781 | !test_bit(STRIPE_EXPANDING, &head->state)); | 788 | !test_bit(STRIPE_EXPANDING, &head->state)); |
789 | inc_empty_inactive_list_flag = 0; | ||
790 | if (!list_empty(conf->inactive_list + hash)) | ||
791 | inc_empty_inactive_list_flag = 1; | ||
782 | list_del_init(&head->lru); | 792 | list_del_init(&head->lru); |
793 | if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) | ||
794 | atomic_inc(&conf->empty_inactive_list_nr); | ||
783 | if (head->group) { | 795 | if (head->group) { |
784 | head->group->stripes_cnt--; | 796 | head->group->stripes_cnt--; |
785 | head->group = NULL; | 797 | head->group = NULL; |
@@ -993,7 +1005,6 @@ again: | |||
993 | 1005 | ||
994 | set_bit(STRIPE_IO_STARTED, &sh->state); | 1006 | set_bit(STRIPE_IO_STARTED, &sh->state); |
995 | 1007 | ||
996 | bio_reset(bi); | ||
997 | bi->bi_bdev = rdev->bdev; | 1008 | bi->bi_bdev = rdev->bdev; |
998 | bio_set_op_attrs(bi, op, op_flags); | 1009 | bio_set_op_attrs(bi, op, op_flags); |
999 | bi->bi_end_io = op_is_write(op) | 1010 | bi->bi_end_io = op_is_write(op) |
@@ -1045,7 +1056,6 @@ again: | |||
1045 | 1056 | ||
1046 | set_bit(STRIPE_IO_STARTED, &sh->state); | 1057 | set_bit(STRIPE_IO_STARTED, &sh->state); |
1047 | 1058 | ||
1048 | bio_reset(rbi); | ||
1049 | rbi->bi_bdev = rrdev->bdev; | 1059 | rbi->bi_bdev = rrdev->bdev; |
1050 | bio_set_op_attrs(rbi, op, op_flags); | 1060 | bio_set_op_attrs(rbi, op, op_flags); |
1051 | BUG_ON(!op_is_write(op)); | 1061 | BUG_ON(!op_is_write(op)); |
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1978 | put_cpu(); | 1988 | put_cpu(); |
1979 | } | 1989 | } |
1980 | 1990 | ||
1981 | static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) | 1991 | static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, |
1992 | int disks) | ||
1982 | { | 1993 | { |
1983 | struct stripe_head *sh; | 1994 | struct stripe_head *sh; |
1995 | int i; | ||
1984 | 1996 | ||
1985 | sh = kmem_cache_zalloc(sc, gfp); | 1997 | sh = kmem_cache_zalloc(sc, gfp); |
1986 | if (sh) { | 1998 | if (sh) { |
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) | |||
1989 | INIT_LIST_HEAD(&sh->batch_list); | 2001 | INIT_LIST_HEAD(&sh->batch_list); |
1990 | INIT_LIST_HEAD(&sh->lru); | 2002 | INIT_LIST_HEAD(&sh->lru); |
1991 | atomic_set(&sh->count, 1); | 2003 | atomic_set(&sh->count, 1); |
2004 | for (i = 0; i < disks; i++) { | ||
2005 | struct r5dev *dev = &sh->dev[i]; | ||
2006 | |||
2007 | bio_init(&dev->req); | ||
2008 | dev->req.bi_io_vec = &dev->vec; | ||
2009 | dev->req.bi_max_vecs = 1; | ||
2010 | |||
2011 | bio_init(&dev->rreq); | ||
2012 | dev->rreq.bi_io_vec = &dev->rvec; | ||
2013 | dev->rreq.bi_max_vecs = 1; | ||
2014 | } | ||
1992 | } | 2015 | } |
1993 | return sh; | 2016 | return sh; |
1994 | } | 2017 | } |
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | |||
1996 | { | 2019 | { |
1997 | struct stripe_head *sh; | 2020 | struct stripe_head *sh; |
1998 | 2021 | ||
1999 | sh = alloc_stripe(conf->slab_cache, gfp); | 2022 | sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size); |
2000 | if (!sh) | 2023 | if (!sh) |
2001 | return 0; | 2024 | return 0; |
2002 | 2025 | ||
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2167 | mutex_lock(&conf->cache_size_mutex); | 2190 | mutex_lock(&conf->cache_size_mutex); |
2168 | 2191 | ||
2169 | for (i = conf->max_nr_stripes; i; i--) { | 2192 | for (i = conf->max_nr_stripes; i; i--) { |
2170 | nsh = alloc_stripe(sc, GFP_KERNEL); | 2193 | nsh = alloc_stripe(sc, GFP_KERNEL, newsize); |
2171 | if (!nsh) | 2194 | if (!nsh) |
2172 | break; | 2195 | break; |
2173 | 2196 | ||
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2299 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2322 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2300 | bi->bi_error); | 2323 | bi->bi_error); |
2301 | if (i == disks) { | 2324 | if (i == disks) { |
2325 | bio_reset(bi); | ||
2302 | BUG(); | 2326 | BUG(); |
2303 | return; | 2327 | return; |
2304 | } | 2328 | } |
@@ -2399,6 +2423,7 @@ static void raid5_end_read_request(struct bio * bi) | |||
2399 | } | 2423 | } |
2400 | } | 2424 | } |
2401 | rdev_dec_pending(rdev, conf->mddev); | 2425 | rdev_dec_pending(rdev, conf->mddev); |
2426 | bio_reset(bi); | ||
2402 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 2427 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
2403 | set_bit(STRIPE_HANDLE, &sh->state); | 2428 | set_bit(STRIPE_HANDLE, &sh->state); |
2404 | raid5_release_stripe(sh); | 2429 | raid5_release_stripe(sh); |
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi) | |||
2436 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), | 2461 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
2437 | bi->bi_error); | 2462 | bi->bi_error); |
2438 | if (i == disks) { | 2463 | if (i == disks) { |
2464 | bio_reset(bi); | ||
2439 | BUG(); | 2465 | BUG(); |
2440 | return; | 2466 | return; |
2441 | } | 2467 | } |
@@ -2472,6 +2498,7 @@ static void raid5_end_write_request(struct bio *bi) | |||
2472 | if (sh->batch_head && bi->bi_error && !replacement) | 2498 | if (sh->batch_head && bi->bi_error && !replacement) |
2473 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); | 2499 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); |
2474 | 2500 | ||
2501 | bio_reset(bi); | ||
2475 | if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) | 2502 | if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) |
2476 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 2503 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
2477 | set_bit(STRIPE_HANDLE, &sh->state); | 2504 | set_bit(STRIPE_HANDLE, &sh->state); |
@@ -2485,16 +2512,6 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous) | |||
2485 | { | 2512 | { |
2486 | struct r5dev *dev = &sh->dev[i]; | 2513 | struct r5dev *dev = &sh->dev[i]; |
2487 | 2514 | ||
2488 | bio_init(&dev->req); | ||
2489 | dev->req.bi_io_vec = &dev->vec; | ||
2490 | dev->req.bi_max_vecs = 1; | ||
2491 | dev->req.bi_private = sh; | ||
2492 | |||
2493 | bio_init(&dev->rreq); | ||
2494 | dev->rreq.bi_io_vec = &dev->rvec; | ||
2495 | dev->rreq.bi_max_vecs = 1; | ||
2496 | dev->rreq.bi_private = sh; | ||
2497 | |||
2498 | dev->flags = 0; | 2515 | dev->flags = 0; |
2499 | dev->sector = raid5_compute_blocknr(sh, i, previous); | 2516 | dev->sector = raid5_compute_blocknr(sh, i, previous); |
2500 | } | 2517 | } |
@@ -4628,7 +4645,9 @@ finish: | |||
4628 | } | 4645 | } |
4629 | 4646 | ||
4630 | if (!bio_list_empty(&s.return_bi)) { | 4647 | if (!bio_list_empty(&s.return_bi)) { |
4631 | if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { | 4648 | if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) && |
4649 | (s.failed <= conf->max_degraded || | ||
4650 | conf->mddev->external == 0)) { | ||
4632 | spin_lock_irq(&conf->device_lock); | 4651 | spin_lock_irq(&conf->device_lock); |
4633 | bio_list_merge(&conf->return_bi, &s.return_bi); | 4652 | bio_list_merge(&conf->return_bi, &s.return_bi); |
4634 | spin_unlock_irq(&conf->device_lock); | 4653 | spin_unlock_irq(&conf->device_lock); |
@@ -6620,6 +6639,16 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
6620 | } | 6639 | } |
6621 | 6640 | ||
6622 | conf->min_nr_stripes = NR_STRIPES; | 6641 | conf->min_nr_stripes = NR_STRIPES; |
6642 | if (mddev->reshape_position != MaxSector) { | ||
6643 | int stripes = max_t(int, | ||
6644 | ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, | ||
6645 | ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); | ||
6646 | conf->min_nr_stripes = max(NR_STRIPES, stripes); | ||
6647 | if (conf->min_nr_stripes != NR_STRIPES) | ||
6648 | printk(KERN_INFO | ||
6649 | "md/raid:%s: force stripe size %d for reshape\n", | ||
6650 | mdname(mddev), conf->min_nr_stripes); | ||
6651 | } | ||
6623 | memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + | 6652 | memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + |
6624 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; | 6653 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
6625 | atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); | 6654 | atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); |
@@ -6826,11 +6855,14 @@ static int raid5_run(struct mddev *mddev) | |||
6826 | if (IS_ERR(conf)) | 6855 | if (IS_ERR(conf)) |
6827 | return PTR_ERR(conf); | 6856 | return PTR_ERR(conf); |
6828 | 6857 | ||
6829 | if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) { | 6858 | if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { |
6830 | printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n", | 6859 | if (!journal_dev) { |
6831 | mdname(mddev)); | 6860 | pr_err("md/raid:%s: journal disk is missing, force array readonly\n", |
6832 | mddev->ro = 1; | 6861 | mdname(mddev)); |
6833 | set_disk_ro(mddev->gendisk, 1); | 6862 | mddev->ro = 1; |
6863 | set_disk_ro(mddev->gendisk, 1); | ||
6864 | } else if (mddev->recovery_cp == MaxSector) | ||
6865 | set_bit(MD_JOURNAL_CLEAN, &mddev->flags); | ||
6834 | } | 6866 | } |
6835 | 6867 | ||
6836 | conf->min_offset_diff = min_offset_diff; | 6868 | conf->min_offset_diff = min_offset_diff; |
diff --git a/drivers/media/cec-edid.c b/drivers/media/cec-edid.c index 70018247bdda..5719b991e340 100644 --- a/drivers/media/cec-edid.c +++ b/drivers/media/cec-edid.c | |||
@@ -70,7 +70,10 @@ static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size) | |||
70 | u8 tag = edid[i] >> 5; | 70 | u8 tag = edid[i] >> 5; |
71 | u8 len = edid[i] & 0x1f; | 71 | u8 len = edid[i] & 0x1f; |
72 | 72 | ||
73 | if (tag == 3 && len >= 5 && i + len <= end) | 73 | if (tag == 3 && len >= 5 && i + len <= end && |
74 | edid[i + 1] == 0x03 && | ||
75 | edid[i + 2] == 0x0c && | ||
76 | edid[i + 3] == 0x00) | ||
74 | return i + 4; | 77 | return i + 4; |
75 | i += len + 1; | 78 | i += len + 1; |
76 | } while (i < end); | 79 | } while (i < end); |
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index efec2d1a7afd..4d080da7afaf 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c | |||
@@ -1552,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev) | |||
1552 | q->mem_ops = &vb2_dma_sg_memops; | 1552 | q->mem_ops = &vb2_dma_sg_memops; |
1553 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; | 1553 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; |
1554 | q->lock = &dev->lock; | 1554 | q->lock = &dev->lock; |
1555 | q->dev = &dev->pci->dev; | ||
1555 | 1556 | ||
1556 | err = vb2_queue_init(q); | 1557 | err = vb2_queue_init(q); |
1557 | if (err < 0) | 1558 | if (err < 0) |
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c index db987e5b93eb..59a4b5f7724e 100644 --- a/drivers/media/pci/saa7134/saa7134-dvb.c +++ b/drivers/media/pci/saa7134/saa7134-dvb.c | |||
@@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev) | |||
1238 | q->buf_struct_size = sizeof(struct saa7134_buf); | 1238 | q->buf_struct_size = sizeof(struct saa7134_buf); |
1239 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; | 1239 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; |
1240 | q->lock = &dev->lock; | 1240 | q->lock = &dev->lock; |
1241 | q->dev = &dev->pci->dev; | ||
1241 | ret = vb2_queue_init(q); | 1242 | ret = vb2_queue_init(q); |
1242 | if (ret) { | 1243 | if (ret) { |
1243 | vb2_dvb_dealloc_frontends(&dev->frontends); | 1244 | vb2_dvb_dealloc_frontends(&dev->frontends); |
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index ca417a454d67..791a5161809b 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c | |||
@@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev) | |||
295 | q->buf_struct_size = sizeof(struct saa7134_buf); | 295 | q->buf_struct_size = sizeof(struct saa7134_buf); |
296 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; | 296 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; |
297 | q->lock = &dev->lock; | 297 | q->lock = &dev->lock; |
298 | q->dev = &dev->pci->dev; | ||
298 | err = vb2_queue_init(q); | 299 | err = vb2_queue_init(q); |
299 | if (err) | 300 | if (err) |
300 | return err; | 301 | return err; |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index f25344bc7912..552b635cfce7 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -169,7 +169,7 @@ config VIDEO_MEDIATEK_VPU | |||
169 | config VIDEO_MEDIATEK_VCODEC | 169 | config VIDEO_MEDIATEK_VCODEC |
170 | tristate "Mediatek Video Codec driver" | 170 | tristate "Mediatek Video Codec driver" |
171 | depends on MTK_IOMMU || COMPILE_TEST | 171 | depends on MTK_IOMMU || COMPILE_TEST |
172 | depends on VIDEO_DEV && VIDEO_V4L2 | 172 | depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA |
173 | depends on ARCH_MEDIATEK || COMPILE_TEST | 173 | depends on ARCH_MEDIATEK || COMPILE_TEST |
174 | select VIDEOBUF2_DMA_CONTIG | 174 | select VIDEOBUF2_DMA_CONTIG |
175 | select V4L2_MEM2MEM_DEV | 175 | select V4L2_MEM2MEM_DEV |
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h index 94f0a425be42..3a8e6958adae 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <media/v4l2-ioctl.h> | 23 | #include <media/v4l2-ioctl.h> |
24 | #include <media/videobuf2-core.h> | 24 | #include <media/videobuf2-core.h> |
25 | 25 | ||
26 | #include "mtk_vcodec_util.h" | ||
27 | 26 | ||
28 | #define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv" | 27 | #define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv" |
29 | #define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc" | 28 | #define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc" |
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c index 3ed3f2d31df5..2c5719ac23b2 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c | |||
@@ -487,7 +487,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv, | |||
487 | struct mtk_q_data *q_data; | 487 | struct mtk_q_data *q_data; |
488 | int ret, i; | 488 | int ret, i; |
489 | struct mtk_video_fmt *fmt; | 489 | struct mtk_video_fmt *fmt; |
490 | unsigned int pitch_w_div16; | ||
491 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; | 490 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; |
492 | 491 | ||
493 | vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); | 492 | vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); |
@@ -530,15 +529,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv, | |||
530 | q_data->coded_width = f->fmt.pix_mp.width; | 529 | q_data->coded_width = f->fmt.pix_mp.width; |
531 | q_data->coded_height = f->fmt.pix_mp.height; | 530 | q_data->coded_height = f->fmt.pix_mp.height; |
532 | 531 | ||
533 | pitch_w_div16 = DIV_ROUND_UP(q_data->visible_width, 16); | ||
534 | if (pitch_w_div16 % 8 != 0) { | ||
535 | /* Adjust returned width/height, so application could correctly | ||
536 | * allocate hw required memory | ||
537 | */ | ||
538 | q_data->visible_height += 32; | ||
539 | vidioc_try_fmt(f, q_data->fmt); | ||
540 | } | ||
541 | |||
542 | q_data->field = f->fmt.pix_mp.field; | 532 | q_data->field = f->fmt.pix_mp.field; |
543 | ctx->colorspace = f->fmt.pix_mp.colorspace; | 533 | ctx->colorspace = f->fmt.pix_mp.colorspace; |
544 | ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; | 534 | ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; |
@@ -878,7 +868,8 @@ static int mtk_venc_encode_header(void *priv) | |||
878 | { | 868 | { |
879 | struct mtk_vcodec_ctx *ctx = priv; | 869 | struct mtk_vcodec_ctx *ctx = priv; |
880 | int ret; | 870 | int ret; |
881 | struct vb2_buffer *dst_buf; | 871 | struct vb2_buffer *src_buf, *dst_buf; |
872 | struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; | ||
882 | struct mtk_vcodec_mem bs_buf; | 873 | struct mtk_vcodec_mem bs_buf; |
883 | struct venc_done_result enc_result; | 874 | struct venc_done_result enc_result; |
884 | 875 | ||
@@ -911,6 +902,15 @@ static int mtk_venc_encode_header(void *priv) | |||
911 | mtk_v4l2_err("venc_if_encode failed=%d", ret); | 902 | mtk_v4l2_err("venc_if_encode failed=%d", ret); |
912 | return -EINVAL; | 903 | return -EINVAL; |
913 | } | 904 | } |
905 | src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); | ||
906 | if (src_buf) { | ||
907 | src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); | ||
908 | dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf); | ||
909 | dst_buf->timestamp = src_buf->timestamp; | ||
910 | dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode; | ||
911 | } else { | ||
912 | mtk_v4l2_err("No timestamp for the header buffer."); | ||
913 | } | ||
914 | 914 | ||
915 | ctx->state = MTK_STATE_HEADER; | 915 | ctx->state = MTK_STATE_HEADER; |
916 | dst_buf->planes[0].bytesused = enc_result.bs_size; | 916 | dst_buf->planes[0].bytesused = enc_result.bs_size; |
@@ -1003,7 +1003,7 @@ static void mtk_venc_worker(struct work_struct *work) | |||
1003 | struct mtk_vcodec_mem bs_buf; | 1003 | struct mtk_vcodec_mem bs_buf; |
1004 | struct venc_done_result enc_result; | 1004 | struct venc_done_result enc_result; |
1005 | int ret, i; | 1005 | int ret, i; |
1006 | struct vb2_v4l2_buffer *vb2_v4l2; | 1006 | struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; |
1007 | 1007 | ||
1008 | /* check dst_buf, dst_buf may be removed in device_run | 1008 | /* check dst_buf, dst_buf may be removed in device_run |
1009 | * to stored encdoe header so we need check dst_buf and | 1009 | * to stored encdoe header so we need check dst_buf and |
@@ -1043,9 +1043,14 @@ static void mtk_venc_worker(struct work_struct *work) | |||
1043 | ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME, | 1043 | ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME, |
1044 | &frm_buf, &bs_buf, &enc_result); | 1044 | &frm_buf, &bs_buf, &enc_result); |
1045 | 1045 | ||
1046 | vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf); | 1046 | src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); |
1047 | dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf); | ||
1048 | |||
1049 | dst_buf->timestamp = src_buf->timestamp; | ||
1050 | dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode; | ||
1051 | |||
1047 | if (enc_result.is_key_frm) | 1052 | if (enc_result.is_key_frm) |
1048 | vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME; | 1053 | dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME; |
1049 | 1054 | ||
1050 | if (ret) { | 1055 | if (ret) { |
1051 | v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), | 1056 | v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), |
@@ -1217,7 +1222,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx) | |||
1217 | 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE); | 1222 | 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE); |
1218 | v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, | 1223 | v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, |
1219 | V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, | 1224 | V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, |
1220 | 0, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN); | 1225 | 0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); |
1221 | v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, | 1226 | v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, |
1222 | V4L2_MPEG_VIDEO_H264_LEVEL_4_2, | 1227 | V4L2_MPEG_VIDEO_H264_LEVEL_4_2, |
1223 | 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0); | 1228 | 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0); |
@@ -1288,5 +1293,10 @@ int mtk_venc_lock(struct mtk_vcodec_ctx *ctx) | |||
1288 | 1293 | ||
1289 | void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx) | 1294 | void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx) |
1290 | { | 1295 | { |
1291 | venc_if_deinit(ctx); | 1296 | int ret = venc_if_deinit(ctx); |
1297 | |||
1298 | if (ret) | ||
1299 | mtk_v4l2_err("venc_if_deinit failed=%d", ret); | ||
1300 | |||
1301 | ctx->state = MTK_STATE_FREE; | ||
1292 | } | 1302 | } |
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c index c7806ecda2dd..5cd2151431bf 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c | |||
@@ -218,11 +218,15 @@ static int fops_vcodec_release(struct file *file) | |||
218 | mtk_v4l2_debug(1, "[%d] encoder", ctx->id); | 218 | mtk_v4l2_debug(1, "[%d] encoder", ctx->id); |
219 | mutex_lock(&dev->dev_mutex); | 219 | mutex_lock(&dev->dev_mutex); |
220 | 220 | ||
221 | /* | ||
222 | * Call v4l2_m2m_ctx_release to make sure the worker thread is not | ||
223 | * running after venc_if_deinit. | ||
224 | */ | ||
225 | v4l2_m2m_ctx_release(ctx->m2m_ctx); | ||
221 | mtk_vcodec_enc_release(ctx); | 226 | mtk_vcodec_enc_release(ctx); |
222 | v4l2_fh_del(&ctx->fh); | 227 | v4l2_fh_del(&ctx->fh); |
223 | v4l2_fh_exit(&ctx->fh); | 228 | v4l2_fh_exit(&ctx->fh); |
224 | v4l2_ctrl_handler_free(&ctx->ctrl_hdl); | 229 | v4l2_ctrl_handler_free(&ctx->ctrl_hdl); |
225 | v4l2_m2m_ctx_release(ctx->m2m_ctx); | ||
226 | 230 | ||
227 | list_del_init(&ctx->list); | 231 | list_del_init(&ctx->list); |
228 | dev->num_instances--; | 232 | dev->num_instances--; |
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h index 33e890f5aa9c..12131855b46a 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h | |||
@@ -16,7 +16,6 @@ | |||
16 | #define _MTK_VCODEC_INTR_H_ | 16 | #define _MTK_VCODEC_INTR_H_ |
17 | 17 | ||
18 | #define MTK_INST_IRQ_RECEIVED 0x1 | 18 | #define MTK_INST_IRQ_RECEIVED 0x1 |
19 | #define MTK_INST_WORK_THREAD_ABORT_DONE 0x2 | ||
20 | 19 | ||
21 | struct mtk_vcodec_ctx; | 20 | struct mtk_vcodec_ctx; |
22 | 21 | ||
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c index 9a600525b3c1..63d4be4ff327 100644 --- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c +++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c | |||
@@ -61,6 +61,8 @@ enum venc_h264_bs_mode { | |||
61 | 61 | ||
62 | /* | 62 | /* |
63 | * struct venc_h264_vpu_config - Structure for h264 encoder configuration | 63 | * struct venc_h264_vpu_config - Structure for h264 encoder configuration |
64 | * AP-W/R : AP is writer/reader on this item | ||
65 | * VPU-W/R: VPU is write/reader on this item | ||
64 | * @input_fourcc: input fourcc | 66 | * @input_fourcc: input fourcc |
65 | * @bitrate: target bitrate (in bps) | 67 | * @bitrate: target bitrate (in bps) |
66 | * @pic_w: picture width. Picture size is visible stream resolution, in pixels, | 68 | * @pic_w: picture width. Picture size is visible stream resolution, in pixels, |
@@ -94,13 +96,13 @@ struct venc_h264_vpu_config { | |||
94 | 96 | ||
95 | /* | 97 | /* |
96 | * struct venc_h264_vpu_buf - Structure for buffer information | 98 | * struct venc_h264_vpu_buf - Structure for buffer information |
97 | * @align: buffer alignment (in bytes) | 99 | * AP-W/R : AP is writer/reader on this item |
100 | * VPU-W/R: VPU is write/reader on this item | ||
98 | * @iova: IO virtual address | 101 | * @iova: IO virtual address |
99 | * @vpua: VPU side memory addr which is used by RC_CODE | 102 | * @vpua: VPU side memory addr which is used by RC_CODE |
100 | * @size: buffer size (in bytes) | 103 | * @size: buffer size (in bytes) |
101 | */ | 104 | */ |
102 | struct venc_h264_vpu_buf { | 105 | struct venc_h264_vpu_buf { |
103 | u32 align; | ||
104 | u32 iova; | 106 | u32 iova; |
105 | u32 vpua; | 107 | u32 vpua; |
106 | u32 size; | 108 | u32 size; |
@@ -108,6 +110,8 @@ struct venc_h264_vpu_buf { | |||
108 | 110 | ||
109 | /* | 111 | /* |
110 | * struct venc_h264_vsi - Structure for VPU driver control and info share | 112 | * struct venc_h264_vsi - Structure for VPU driver control and info share |
113 | * AP-W/R : AP is writer/reader on this item | ||
114 | * VPU-W/R: VPU is write/reader on this item | ||
111 | * This structure is allocated in VPU side and shared to AP side. | 115 | * This structure is allocated in VPU side and shared to AP side. |
112 | * @config: h264 encoder configuration | 116 | * @config: h264 encoder configuration |
113 | * @work_bufs: working buffer information in VPU side | 117 | * @work_bufs: working buffer information in VPU side |
@@ -150,12 +154,6 @@ struct venc_h264_inst { | |||
150 | struct mtk_vcodec_ctx *ctx; | 154 | struct mtk_vcodec_ctx *ctx; |
151 | }; | 155 | }; |
152 | 156 | ||
153 | static inline void h264_write_reg(struct venc_h264_inst *inst, u32 addr, | ||
154 | u32 val) | ||
155 | { | ||
156 | writel(val, inst->hw_base + addr); | ||
157 | } | ||
158 | |||
159 | static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr) | 157 | static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr) |
160 | { | 158 | { |
161 | return readl(inst->hw_base + addr); | 159 | return readl(inst->hw_base + addr); |
@@ -214,6 +212,8 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst, | |||
214 | return 40; | 212 | return 40; |
215 | case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: | 213 | case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: |
216 | return 41; | 214 | return 41; |
215 | case V4L2_MPEG_VIDEO_H264_LEVEL_4_2: | ||
216 | return 42; | ||
217 | default: | 217 | default: |
218 | mtk_vcodec_debug(inst, "unsupported level %d", level); | 218 | mtk_vcodec_debug(inst, "unsupported level %d", level); |
219 | return 31; | 219 | return 31; |
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c index 60bbcd2a0510..6d9758479f9a 100644 --- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c +++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c | |||
@@ -56,6 +56,8 @@ enum venc_vp8_vpu_work_buf { | |||
56 | 56 | ||
57 | /* | 57 | /* |
58 | * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration | 58 | * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration |
59 | * AP-W/R : AP is writer/reader on this item | ||
60 | * VPU-W/R: VPU is write/reader on this item | ||
59 | * @input_fourcc: input fourcc | 61 | * @input_fourcc: input fourcc |
60 | * @bitrate: target bitrate (in bps) | 62 | * @bitrate: target bitrate (in bps) |
61 | * @pic_w: picture width. Picture size is visible stream resolution, in pixels, | 63 | * @pic_w: picture width. Picture size is visible stream resolution, in pixels, |
@@ -83,14 +85,14 @@ struct venc_vp8_vpu_config { | |||
83 | }; | 85 | }; |
84 | 86 | ||
85 | /* | 87 | /* |
86 | * struct venc_vp8_vpu_buf -Structure for buffer information | 88 | * struct venc_vp8_vpu_buf - Structure for buffer information |
87 | * @align: buffer alignment (in bytes) | 89 | * AP-W/R : AP is writer/reader on this item |
90 | * VPU-W/R: VPU is write/reader on this item | ||
88 | * @iova: IO virtual address | 91 | * @iova: IO virtual address |
89 | * @vpua: VPU side memory addr which is used by RC_CODE | 92 | * @vpua: VPU side memory addr which is used by RC_CODE |
90 | * @size: buffer size (in bytes) | 93 | * @size: buffer size (in bytes) |
91 | */ | 94 | */ |
92 | struct venc_vp8_vpu_buf { | 95 | struct venc_vp8_vpu_buf { |
93 | u32 align; | ||
94 | u32 iova; | 96 | u32 iova; |
95 | u32 vpua; | 97 | u32 vpua; |
96 | u32 size; | 98 | u32 size; |
@@ -98,6 +100,8 @@ struct venc_vp8_vpu_buf { | |||
98 | 100 | ||
99 | /* | 101 | /* |
100 | * struct venc_vp8_vsi - Structure for VPU driver control and info share | 102 | * struct venc_vp8_vsi - Structure for VPU driver control and info share |
103 | * AP-W/R : AP is writer/reader on this item | ||
104 | * VPU-W/R: VPU is write/reader on this item | ||
101 | * This structure is allocated in VPU side and shared to AP side. | 105 | * This structure is allocated in VPU side and shared to AP side. |
102 | * @config: vp8 encoder configuration | 106 | * @config: vp8 encoder configuration |
103 | * @work_bufs: working buffer information in VPU side | 107 | * @work_bufs: working buffer information in VPU side |
@@ -138,12 +142,6 @@ struct venc_vp8_inst { | |||
138 | struct mtk_vcodec_ctx *ctx; | 142 | struct mtk_vcodec_ctx *ctx; |
139 | }; | 143 | }; |
140 | 144 | ||
141 | static inline void vp8_enc_write_reg(struct venc_vp8_inst *inst, u32 addr, | ||
142 | u32 val) | ||
143 | { | ||
144 | writel(val, inst->hw_base + addr); | ||
145 | } | ||
146 | |||
147 | static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr) | 145 | static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr) |
148 | { | 146 | { |
149 | return readl(inst->hw_base + addr); | 147 | return readl(inst->hw_base + addr); |
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c index 6a7bcc3028b1..bc50c69ee0c5 100644 --- a/drivers/media/platform/rcar-fcp.c +++ b/drivers/media/platform/rcar-fcp.c | |||
@@ -99,10 +99,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put); | |||
99 | */ | 99 | */ |
100 | int rcar_fcp_enable(struct rcar_fcp_device *fcp) | 100 | int rcar_fcp_enable(struct rcar_fcp_device *fcp) |
101 | { | 101 | { |
102 | int error; | ||
103 | |||
102 | if (!fcp) | 104 | if (!fcp) |
103 | return 0; | 105 | return 0; |
104 | 106 | ||
105 | return pm_runtime_get_sync(fcp->dev); | 107 | error = pm_runtime_get_sync(fcp->dev); |
108 | if (error < 0) | ||
109 | return error; | ||
110 | |||
111 | return 0; | ||
106 | } | 112 | } |
107 | EXPORT_SYMBOL_GPL(rcar_fcp_enable); | 113 | EXPORT_SYMBOL_GPL(rcar_fcp_enable); |
108 | 114 | ||
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 869c83fb3c5d..f00f3e742265 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c | |||
@@ -2185,7 +2185,7 @@ static int gpmc_probe_dt(struct platform_device *pdev) | |||
2185 | return 0; | 2185 | return 0; |
2186 | } | 2186 | } |
2187 | 2187 | ||
2188 | static int gpmc_probe_dt_children(struct platform_device *pdev) | 2188 | static void gpmc_probe_dt_children(struct platform_device *pdev) |
2189 | { | 2189 | { |
2190 | int ret; | 2190 | int ret; |
2191 | struct device_node *child; | 2191 | struct device_node *child; |
@@ -2200,11 +2200,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev) | |||
2200 | else | 2200 | else |
2201 | ret = gpmc_probe_generic_child(pdev, child); | 2201 | ret = gpmc_probe_generic_child(pdev, child); |
2202 | 2202 | ||
2203 | if (ret) | 2203 | if (ret) { |
2204 | return ret; | 2204 | dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n", |
2205 | child->name, ret); | ||
2206 | } | ||
2205 | } | 2207 | } |
2206 | |||
2207 | return 0; | ||
2208 | } | 2208 | } |
2209 | #else | 2209 | #else |
2210 | static int gpmc_probe_dt(struct platform_device *pdev) | 2210 | static int gpmc_probe_dt(struct platform_device *pdev) |
@@ -2212,9 +2212,8 @@ static int gpmc_probe_dt(struct platform_device *pdev) | |||
2212 | return 0; | 2212 | return 0; |
2213 | } | 2213 | } |
2214 | 2214 | ||
2215 | static int gpmc_probe_dt_children(struct platform_device *pdev) | 2215 | static void gpmc_probe_dt_children(struct platform_device *pdev) |
2216 | { | 2216 | { |
2217 | return 0; | ||
2218 | } | 2217 | } |
2219 | #endif /* CONFIG_OF */ | 2218 | #endif /* CONFIG_OF */ |
2220 | 2219 | ||
@@ -2369,16 +2368,10 @@ static int gpmc_probe(struct platform_device *pdev) | |||
2369 | goto setup_irq_failed; | 2368 | goto setup_irq_failed; |
2370 | } | 2369 | } |
2371 | 2370 | ||
2372 | rc = gpmc_probe_dt_children(pdev); | 2371 | gpmc_probe_dt_children(pdev); |
2373 | if (rc < 0) { | ||
2374 | dev_err(gpmc->dev, "failed to probe DT children\n"); | ||
2375 | goto dt_children_failed; | ||
2376 | } | ||
2377 | 2372 | ||
2378 | return 0; | 2373 | return 0; |
2379 | 2374 | ||
2380 | dt_children_failed: | ||
2381 | gpmc_free_irq(gpmc); | ||
2382 | setup_irq_failed: | 2375 | setup_irq_failed: |
2383 | gpmc_gpio_exit(gpmc); | 2376 | gpmc_gpio_exit(gpmc); |
2384 | gpio_init_failed: | 2377 | gpio_init_failed: |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index a216b4667742..d00252828966 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -345,16 +345,6 @@ config SENSORS_TSL2550 | |||
345 | This driver can also be built as a module. If so, the module | 345 | This driver can also be built as a module. If so, the module |
346 | will be called tsl2550. | 346 | will be called tsl2550. |
347 | 347 | ||
348 | config SENSORS_BH1780 | ||
349 | tristate "ROHM BH1780GLI ambient light sensor" | ||
350 | depends on I2C && SYSFS | ||
351 | help | ||
352 | If you say yes here you get support for the ROHM BH1780GLI | ||
353 | ambient light sensor. | ||
354 | |||
355 | This driver can also be built as a module. If so, the module | ||
356 | will be called bh1780gli. | ||
357 | |||
358 | config SENSORS_BH1770 | 348 | config SENSORS_BH1770 |
359 | tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor" | 349 | tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor" |
360 | depends on I2C | 350 | depends on I2C |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 7410c6d9a34d..fb32516ddfe2 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o | |||
19 | obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o | 19 | obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o |
20 | obj-$(CONFIG_PHANTOM) += phantom.o | 20 | obj-$(CONFIG_PHANTOM) += phantom.o |
21 | obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o | 21 | obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o |
22 | obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o | ||
23 | obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o | 22 | obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o |
24 | obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o | 23 | obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o |
25 | obj-$(CONFIG_SGI_IOC4) += ioc4.o | 24 | obj-$(CONFIG_SGI_IOC4) += ioc4.o |
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c deleted file mode 100644 index 7f90ce5a569a..000000000000 --- a/drivers/misc/bh1780gli.c +++ /dev/null | |||
@@ -1,259 +0,0 @@ | |||
1 | /* | ||
2 | * bh1780gli.c | ||
3 | * ROHM Ambient Light Sensor Driver | ||
4 | * | ||
5 | * Copyright (C) 2010 Texas Instruments | ||
6 | * Author: Hemanth V <hemanthv@ti.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published by | ||
10 | * the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | #include <linux/i2c.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/of.h> | ||
27 | |||
28 | #define BH1780_REG_CONTROL 0x80 | ||
29 | #define BH1780_REG_PARTID 0x8A | ||
30 | #define BH1780_REG_MANFID 0x8B | ||
31 | #define BH1780_REG_DLOW 0x8C | ||
32 | #define BH1780_REG_DHIGH 0x8D | ||
33 | |||
34 | #define BH1780_REVMASK (0xf) | ||
35 | #define BH1780_POWMASK (0x3) | ||
36 | #define BH1780_POFF (0x0) | ||
37 | #define BH1780_PON (0x3) | ||
38 | |||
39 | /* power on settling time in ms */ | ||
40 | #define BH1780_PON_DELAY 2 | ||
41 | |||
42 | struct bh1780_data { | ||
43 | struct i2c_client *client; | ||
44 | int power_state; | ||
45 | /* lock for sysfs operations */ | ||
46 | struct mutex lock; | ||
47 | }; | ||
48 | |||
49 | static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg) | ||
50 | { | ||
51 | int ret = i2c_smbus_write_byte_data(ddata->client, reg, val); | ||
52 | if (ret < 0) | ||
53 | dev_err(&ddata->client->dev, | ||
54 | "i2c_smbus_write_byte_data failed error %d Register (%s)\n", | ||
55 | ret, msg); | ||
56 | return ret; | ||
57 | } | ||
58 | |||
59 | static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg) | ||
60 | { | ||
61 | int ret = i2c_smbus_read_byte_data(ddata->client, reg); | ||
62 | if (ret < 0) | ||
63 | dev_err(&ddata->client->dev, | ||
64 | "i2c_smbus_read_byte_data failed error %d Register (%s)\n", | ||
65 | ret, msg); | ||
66 | return ret; | ||
67 | } | ||
68 | |||
69 | static ssize_t bh1780_show_lux(struct device *dev, | ||
70 | struct device_attribute *attr, char *buf) | ||
71 | { | ||
72 | struct platform_device *pdev = to_platform_device(dev); | ||
73 | struct bh1780_data *ddata = platform_get_drvdata(pdev); | ||
74 | int lsb, msb; | ||
75 | |||
76 | lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW"); | ||
77 | if (lsb < 0) | ||
78 | return lsb; | ||
79 | |||
80 | msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH"); | ||
81 | if (msb < 0) | ||
82 | return msb; | ||
83 | |||
84 | return sprintf(buf, "%d\n", (msb << 8) | lsb); | ||
85 | } | ||
86 | |||
87 | static ssize_t bh1780_show_power_state(struct device *dev, | ||
88 | struct device_attribute *attr, | ||
89 | char *buf) | ||
90 | { | ||
91 | struct platform_device *pdev = to_platform_device(dev); | ||
92 | struct bh1780_data *ddata = platform_get_drvdata(pdev); | ||
93 | int state; | ||
94 | |||
95 | state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); | ||
96 | if (state < 0) | ||
97 | return state; | ||
98 | |||
99 | return sprintf(buf, "%d\n", state & BH1780_POWMASK); | ||
100 | } | ||
101 | |||
102 | static ssize_t bh1780_store_power_state(struct device *dev, | ||
103 | struct device_attribute *attr, | ||
104 | const char *buf, size_t count) | ||
105 | { | ||
106 | struct platform_device *pdev = to_platform_device(dev); | ||
107 | struct bh1780_data *ddata = platform_get_drvdata(pdev); | ||
108 | unsigned long val; | ||
109 | int error; | ||
110 | |||
111 | error = kstrtoul(buf, 0, &val); | ||
112 | if (error) | ||
113 | return error; | ||
114 | |||
115 | if (val < BH1780_POFF || val > BH1780_PON) | ||
116 | return -EINVAL; | ||
117 | |||
118 | mutex_lock(&ddata->lock); | ||
119 | |||
120 | error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL"); | ||
121 | if (error < 0) { | ||
122 | mutex_unlock(&ddata->lock); | ||
123 | return error; | ||
124 | } | ||
125 | |||
126 | msleep(BH1780_PON_DELAY); | ||
127 | ddata->power_state = val; | ||
128 | mutex_unlock(&ddata->lock); | ||
129 | |||
130 | return count; | ||
131 | } | ||
132 | |||
133 | static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL); | ||
134 | |||
135 | static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, | ||
136 | bh1780_show_power_state, bh1780_store_power_state); | ||
137 | |||
138 | static struct attribute *bh1780_attributes[] = { | ||
139 | &dev_attr_power_state.attr, | ||
140 | &dev_attr_lux.attr, | ||
141 | NULL | ||
142 | }; | ||
143 | |||
144 | static const struct attribute_group bh1780_attr_group = { | ||
145 | .attrs = bh1780_attributes, | ||
146 | }; | ||
147 | |||
148 | static int bh1780_probe(struct i2c_client *client, | ||
149 | const struct i2c_device_id *id) | ||
150 | { | ||
151 | int ret; | ||
152 | struct bh1780_data *ddata; | ||
153 | struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); | ||
154 | |||
155 | if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) | ||
156 | return -EIO; | ||
157 | |||
158 | ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data), | ||
159 | GFP_KERNEL); | ||
160 | if (ddata == NULL) | ||
161 | return -ENOMEM; | ||
162 | |||
163 | ddata->client = client; | ||
164 | i2c_set_clientdata(client, ddata); | ||
165 | |||
166 | ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID"); | ||
167 | if (ret < 0) | ||
168 | return ret; | ||
169 | |||
170 | dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n", | ||
171 | (ret & BH1780_REVMASK)); | ||
172 | |||
173 | mutex_init(&ddata->lock); | ||
174 | |||
175 | return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group); | ||
176 | } | ||
177 | |||
178 | static int bh1780_remove(struct i2c_client *client) | ||
179 | { | ||
180 | sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | #ifdef CONFIG_PM_SLEEP | ||
186 | static int bh1780_suspend(struct device *dev) | ||
187 | { | ||
188 | struct bh1780_data *ddata; | ||
189 | int state, ret; | ||
190 | struct i2c_client *client = to_i2c_client(dev); | ||
191 | |||
192 | ddata = i2c_get_clientdata(client); | ||
193 | state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); | ||
194 | if (state < 0) | ||
195 | return state; | ||
196 | |||
197 | ddata->power_state = state & BH1780_POWMASK; | ||
198 | |||
199 | ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, | ||
200 | "CONTROL"); | ||
201 | |||
202 | if (ret < 0) | ||
203 | return ret; | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int bh1780_resume(struct device *dev) | ||
209 | { | ||
210 | struct bh1780_data *ddata; | ||
211 | int state, ret; | ||
212 | struct i2c_client *client = to_i2c_client(dev); | ||
213 | |||
214 | ddata = i2c_get_clientdata(client); | ||
215 | state = ddata->power_state; | ||
216 | ret = bh1780_write(ddata, BH1780_REG_CONTROL, state, | ||
217 | "CONTROL"); | ||
218 | |||
219 | if (ret < 0) | ||
220 | return ret; | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | #endif /* CONFIG_PM_SLEEP */ | ||
225 | |||
226 | static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume); | ||
227 | |||
228 | static const struct i2c_device_id bh1780_id[] = { | ||
229 | { "bh1780", 0 }, | ||
230 | { }, | ||
231 | }; | ||
232 | |||
233 | MODULE_DEVICE_TABLE(i2c, bh1780_id); | ||
234 | |||
235 | #ifdef CONFIG_OF | ||
236 | static const struct of_device_id of_bh1780_match[] = { | ||
237 | { .compatible = "rohm,bh1780gli", }, | ||
238 | {}, | ||
239 | }; | ||
240 | |||
241 | MODULE_DEVICE_TABLE(of, of_bh1780_match); | ||
242 | #endif | ||
243 | |||
244 | static struct i2c_driver bh1780_driver = { | ||
245 | .probe = bh1780_probe, | ||
246 | .remove = bh1780_remove, | ||
247 | .id_table = bh1780_id, | ||
248 | .driver = { | ||
249 | .name = "bh1780", | ||
250 | .pm = &bh1780_pm, | ||
251 | .of_match_table = of_match_ptr(of_bh1780_match), | ||
252 | }, | ||
253 | }; | ||
254 | |||
255 | module_i2c_driver(bh1780_driver); | ||
256 | |||
257 | MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver"); | ||
258 | MODULE_LICENSE("GPL"); | ||
259 | MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>"); | ||
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 7ada5f1b7bb6..3519acebfdab 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c | |||
@@ -230,6 +230,11 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) | |||
230 | if (phb->bus == NULL) | 230 | if (phb->bus == NULL) |
231 | return -ENXIO; | 231 | return -ENXIO; |
232 | 232 | ||
233 | /* Set release hook on root bus */ | ||
234 | pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge), | ||
235 | pcibios_free_controller_deferred, | ||
236 | (void *) phb); | ||
237 | |||
233 | /* Claim resources. This might need some rework as well depending | 238 | /* Claim resources. This might need some rework as well depending |
234 | * whether we are doing probe-only or not, like assigning unassigned | 239 | * whether we are doing probe-only or not, like assigning unassigned |
235 | * resources etc... | 240 | * resources etc... |
@@ -256,7 +261,10 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu) | |||
256 | afu->phb = NULL; | 261 | afu->phb = NULL; |
257 | 262 | ||
258 | pci_remove_root_bus(phb->bus); | 263 | pci_remove_root_bus(phb->bus); |
259 | pcibios_free_controller(phb); | 264 | /* |
265 | * We don't free phb here - that's handled by | ||
266 | * pcibios_free_controller_deferred() | ||
267 | */ | ||
260 | } | 268 | } |
261 | 269 | ||
262 | static bool _cxl_pci_is_vphb_device(struct pci_controller *phb) | 270 | static bool _cxl_pci_is_vphb_device(struct pci_controller *phb) |
diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c index 166b1db3969f..3564477b8c2d 100644 --- a/drivers/misc/lkdtm_rodata.c +++ b/drivers/misc/lkdtm_rodata.c | |||
@@ -4,7 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | #include "lkdtm.h" | 5 | #include "lkdtm.h" |
6 | 6 | ||
7 | void lkdtm_rodata_do_nothing(void) | 7 | void notrace lkdtm_rodata_do_nothing(void) |
8 | { | 8 | { |
9 | /* Does nothing. We just want an architecture agnostic "return". */ | 9 | /* Does nothing. We just want an architecture agnostic "return". */ |
10 | } | 10 | } |
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c index 5525a204db93..1dd611423d8b 100644 --- a/drivers/misc/lkdtm_usercopy.c +++ b/drivers/misc/lkdtm_usercopy.c | |||
@@ -9,7 +9,15 @@ | |||
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <asm/cacheflush.h> | 10 | #include <asm/cacheflush.h> |
11 | 11 | ||
12 | static size_t cache_size = 1024; | 12 | /* |
13 | * Many of the tests here end up using const sizes, but those would | ||
14 | * normally be ignored by hardened usercopy, so force the compiler | ||
15 | * into choosing the non-const path to make sure we trigger the | ||
16 | * hardened usercopy checks by added "unconst" to all the const copies, | ||
17 | * and making sure "cache_size" isn't optimized into a const. | ||
18 | */ | ||
19 | static volatile size_t unconst = 0; | ||
20 | static volatile size_t cache_size = 1024; | ||
13 | static struct kmem_cache *bad_cache; | 21 | static struct kmem_cache *bad_cache; |
14 | 22 | ||
15 | static const unsigned char test_text[] = "This is a test.\n"; | 23 | static const unsigned char test_text[] = "This is a test.\n"; |
@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame) | |||
67 | if (to_user) { | 75 | if (to_user) { |
68 | pr_info("attempting good copy_to_user of local stack\n"); | 76 | pr_info("attempting good copy_to_user of local stack\n"); |
69 | if (copy_to_user((void __user *)user_addr, good_stack, | 77 | if (copy_to_user((void __user *)user_addr, good_stack, |
70 | sizeof(good_stack))) { | 78 | unconst + sizeof(good_stack))) { |
71 | pr_warn("copy_to_user failed unexpectedly?!\n"); | 79 | pr_warn("copy_to_user failed unexpectedly?!\n"); |
72 | goto free_user; | 80 | goto free_user; |
73 | } | 81 | } |
74 | 82 | ||
75 | pr_info("attempting bad copy_to_user of distant stack\n"); | 83 | pr_info("attempting bad copy_to_user of distant stack\n"); |
76 | if (copy_to_user((void __user *)user_addr, bad_stack, | 84 | if (copy_to_user((void __user *)user_addr, bad_stack, |
77 | sizeof(good_stack))) { | 85 | unconst + sizeof(good_stack))) { |
78 | pr_warn("copy_to_user failed, but lacked Oops\n"); | 86 | pr_warn("copy_to_user failed, but lacked Oops\n"); |
79 | goto free_user; | 87 | goto free_user; |
80 | } | 88 | } |
@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame) | |||
88 | 96 | ||
89 | pr_info("attempting good copy_from_user of local stack\n"); | 97 | pr_info("attempting good copy_from_user of local stack\n"); |
90 | if (copy_from_user(good_stack, (void __user *)user_addr, | 98 | if (copy_from_user(good_stack, (void __user *)user_addr, |
91 | sizeof(good_stack))) { | 99 | unconst + sizeof(good_stack))) { |
92 | pr_warn("copy_from_user failed unexpectedly?!\n"); | 100 | pr_warn("copy_from_user failed unexpectedly?!\n"); |
93 | goto free_user; | 101 | goto free_user; |
94 | } | 102 | } |
95 | 103 | ||
96 | pr_info("attempting bad copy_from_user of distant stack\n"); | 104 | pr_info("attempting bad copy_from_user of distant stack\n"); |
97 | if (copy_from_user(bad_stack, (void __user *)user_addr, | 105 | if (copy_from_user(bad_stack, (void __user *)user_addr, |
98 | sizeof(good_stack))) { | 106 | unconst + sizeof(good_stack))) { |
99 | pr_warn("copy_from_user failed, but lacked Oops\n"); | 107 | pr_warn("copy_from_user failed, but lacked Oops\n"); |
100 | goto free_user; | 108 | goto free_user; |
101 | } | 109 | } |
@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user) | |||
109 | { | 117 | { |
110 | unsigned long user_addr; | 118 | unsigned long user_addr; |
111 | unsigned char *one, *two; | 119 | unsigned char *one, *two; |
112 | const size_t size = 1024; | 120 | size_t size = unconst + 1024; |
113 | 121 | ||
114 | one = kmalloc(size, GFP_KERNEL); | 122 | one = kmalloc(size, GFP_KERNEL); |
115 | two = kmalloc(size, GFP_KERNEL); | 123 | two = kmalloc(size, GFP_KERNEL); |
@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void) | |||
285 | 293 | ||
286 | pr_info("attempting good copy_to_user from kernel rodata\n"); | 294 | pr_info("attempting good copy_to_user from kernel rodata\n"); |
287 | if (copy_to_user((void __user *)user_addr, test_text, | 295 | if (copy_to_user((void __user *)user_addr, test_text, |
288 | sizeof(test_text))) { | 296 | unconst + sizeof(test_text))) { |
289 | pr_warn("copy_to_user failed unexpectedly?!\n"); | 297 | pr_warn("copy_to_user failed unexpectedly?!\n"); |
290 | goto free_user; | 298 | goto free_user; |
291 | } | 299 | } |
292 | 300 | ||
293 | pr_info("attempting bad copy_to_user from kernel text\n"); | 301 | pr_info("attempting bad copy_to_user from kernel text\n"); |
294 | if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { | 302 | if (copy_to_user((void __user *)user_addr, vm_mmap, |
303 | unconst + PAGE_SIZE)) { | ||
295 | pr_warn("copy_to_user failed, but lacked Oops\n"); | 304 | pr_warn("copy_to_user failed, but lacked Oops\n"); |
296 | goto free_user; | 305 | goto free_user; |
297 | } | 306 | } |
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index e2fb44cc5c37..dc3a854e02d3 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
@@ -1263,8 +1263,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) | |||
1263 | static bool mei_me_fw_type_sps(struct pci_dev *pdev) | 1263 | static bool mei_me_fw_type_sps(struct pci_dev *pdev) |
1264 | { | 1264 | { |
1265 | u32 reg; | 1265 | u32 reg; |
1266 | /* Read ME FW Status check for SPS Firmware */ | 1266 | unsigned int devfn; |
1267 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); | 1267 | |
1268 | /* | ||
1269 | * Read ME FW Status register to check for SPS Firmware | ||
1270 | * The SPS FW is only signaled in pci function 0 | ||
1271 | */ | ||
1272 | devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); | ||
1273 | pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®); | ||
1268 | trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); | 1274 | trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); |
1269 | /* if bits [19:16] = 15, running SPS Firmware */ | 1275 | /* if bits [19:16] = 15, running SPS Firmware */ |
1270 | return (reg & 0xf0000) == 0xf0000; | 1276 | return (reg & 0xf0000) == 0xf0000; |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 64e64da6da44..71cea9b296b2 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -85,8 +85,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
85 | 85 | ||
86 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)}, | 86 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)}, |
87 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, | 87 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, |
88 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)}, | 88 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, |
89 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)}, | 89 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, |
90 | 90 | ||
91 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, | 91 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, |
92 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, | 92 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 48a5dd740f3b..2206d4477dbb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) | |||
1726 | break; | 1726 | break; |
1727 | 1727 | ||
1728 | if (req_op(next) == REQ_OP_DISCARD || | 1728 | if (req_op(next) == REQ_OP_DISCARD || |
1729 | req_op(next) == REQ_OP_SECURE_ERASE || | ||
1729 | req_op(next) == REQ_OP_FLUSH) | 1730 | req_op(next) == REQ_OP_FLUSH) |
1730 | break; | 1731 | break; |
1731 | 1732 | ||
@@ -2150,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2150 | struct mmc_card *card = md->queue.card; | 2151 | struct mmc_card *card = md->queue.card; |
2151 | struct mmc_host *host = card->host; | 2152 | struct mmc_host *host = card->host; |
2152 | unsigned long flags; | 2153 | unsigned long flags; |
2154 | bool req_is_special = mmc_req_is_special(req); | ||
2153 | 2155 | ||
2154 | if (req && !mq->mqrq_prev->req) | 2156 | if (req && !mq->mqrq_prev->req) |
2155 | /* claim host only for the first request */ | 2157 | /* claim host only for the first request */ |
@@ -2190,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2190 | } | 2192 | } |
2191 | 2193 | ||
2192 | out: | 2194 | out: |
2193 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || | 2195 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special) |
2194 | mmc_req_is_special(req)) | ||
2195 | /* | 2196 | /* |
2196 | * Release host when there are no more requests | 2197 | * Release host when there are no more requests |
2197 | * and after special request(discard, flush) is done. | 2198 | * and after special request(discard, flush) is done. |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index bf14642a576a..708057261b38 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
33 | /* | 33 | /* |
34 | * We only like normal block requests and discards. | 34 | * We only like normal block requests and discards. |
35 | */ | 35 | */ |
36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { | 36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD && |
37 | req_op(req) != REQ_OP_SECURE_ERASE) { | ||
37 | blk_dump_rq_flags(req, "MMC bad request"); | 38 | blk_dump_rq_flags(req, "MMC bad request"); |
38 | return BLKPREP_KILL; | 39 | return BLKPREP_KILL; |
39 | } | 40 | } |
@@ -64,6 +65,8 @@ static int mmc_queue_thread(void *d) | |||
64 | spin_unlock_irq(q->queue_lock); | 65 | spin_unlock_irq(q->queue_lock); |
65 | 66 | ||
66 | if (req || mq->mqrq_prev->req) { | 67 | if (req || mq->mqrq_prev->req) { |
68 | bool req_is_special = mmc_req_is_special(req); | ||
69 | |||
67 | set_current_state(TASK_RUNNING); | 70 | set_current_state(TASK_RUNNING); |
68 | mq->issue_fn(mq, req); | 71 | mq->issue_fn(mq, req); |
69 | cond_resched(); | 72 | cond_resched(); |
@@ -79,7 +82,7 @@ static int mmc_queue_thread(void *d) | |||
79 | * has been finished. Do not assign it to previous | 82 | * has been finished. Do not assign it to previous |
80 | * request. | 83 | * request. |
81 | */ | 84 | */ |
82 | if (mmc_req_is_special(req)) | 85 | if (req_is_special) |
83 | mq->mqrq_cur->req = NULL; | 86 | mq->mqrq_cur->req = NULL; |
84 | 87 | ||
85 | mq->mqrq_prev->brq.mrq.data = NULL; | 88 | mq->mqrq_prev->brq.mrq.data = NULL; |
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d62531124d54..fee5e1271465 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -4,7 +4,9 @@ | |||
4 | static inline bool mmc_req_is_special(struct request *req) | 4 | static inline bool mmc_req_is_special(struct request *req) |
5 | { | 5 | { |
6 | return req && | 6 | return req && |
7 | (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); | 7 | (req_op(req) == REQ_OP_FLUSH || |
8 | req_op(req) == REQ_OP_DISCARD || | ||
9 | req_op(req) == REQ_OP_SECURE_ERASE); | ||
8 | } | 10 | } |
9 | 11 | ||
10 | struct request; | 12 | struct request; |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 32380d5d4f6b..767af2026f8b 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) | |||
1112 | 1112 | ||
1113 | div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; | 1113 | div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; |
1114 | 1114 | ||
1115 | dev_info(&slot->mmc->class_dev, | 1115 | if (clock != slot->__clk_old || force_clkinit) |
1116 | "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", | 1116 | dev_info(&slot->mmc->class_dev, |
1117 | slot->id, host->bus_hz, clock, | 1117 | "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", |
1118 | div ? ((host->bus_hz / div) >> 1) : | 1118 | slot->id, host->bus_hz, clock, |
1119 | host->bus_hz, div); | 1119 | div ? ((host->bus_hz / div) >> 1) : |
1120 | host->bus_hz, div); | ||
1120 | 1121 | ||
1121 | /* disable clock */ | 1122 | /* disable clock */ |
1122 | mci_writel(host, CLKENA, 0); | 1123 | mci_writel(host, CLKENA, 0); |
@@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) | |||
1139 | 1140 | ||
1140 | /* inform CIU */ | 1141 | /* inform CIU */ |
1141 | mci_send_cmd(slot, sdmmc_cmd_bits, 0); | 1142 | mci_send_cmd(slot, sdmmc_cmd_bits, 0); |
1143 | |||
1144 | /* keep the last clock value that was requested from core */ | ||
1145 | slot->__clk_old = clock; | ||
1142 | } | 1146 | } |
1143 | 1147 | ||
1144 | host->current_speed = clock; | 1148 | host->current_speed = clock; |
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 9e740bc232a8..e8cd2dec3263 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h | |||
@@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host); | |||
249 | * @queue_node: List node for placing this node in the @queue list of | 249 | * @queue_node: List node for placing this node in the @queue list of |
250 | * &struct dw_mci. | 250 | * &struct dw_mci. |
251 | * @clock: Clock rate configured by set_ios(). Protected by host->lock. | 251 | * @clock: Clock rate configured by set_ios(). Protected by host->lock. |
252 | * @__clk_old: The last clock value that was requested from core. | ||
253 | * Keeping track of this helps us to avoid spamming the console. | ||
252 | * @flags: Random state bits associated with the slot. | 254 | * @flags: Random state bits associated with the slot. |
253 | * @id: Number of this slot. | 255 | * @id: Number of this slot. |
254 | * @sdio_id: Number of this slot in the SDIO interrupt registers. | 256 | * @sdio_id: Number of this slot in the SDIO interrupt registers. |
@@ -263,6 +265,7 @@ struct dw_mci_slot { | |||
263 | struct list_head queue_node; | 265 | struct list_head queue_node; |
264 | 266 | ||
265 | unsigned int clock; | 267 | unsigned int clock; |
268 | unsigned int __clk_old; | ||
266 | 269 | ||
267 | unsigned long flags; | 270 | unsigned long flags; |
268 | #define DW_MMC_CARD_PRESENT 0 | 271 | #define DW_MMC_CARD_PRESENT 0 |
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index f23d65eb070d..be3c49fa7382 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c | |||
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) | |||
1016 | 1016 | ||
1017 | /* Only reconfigure if we have a different burst size */ | 1017 | /* Only reconfigure if we have a different burst size */ |
1018 | if (*bp != burst) { | 1018 | if (*bp != burst) { |
1019 | struct dma_slave_config cfg; | 1019 | struct dma_slave_config cfg = { |
1020 | 1020 | .src_addr = host->phys_base + | |
1021 | cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); | 1021 | OMAP_MMC_REG(host, DATA), |
1022 | cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); | 1022 | .dst_addr = host->phys_base + |
1023 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | 1023 | OMAP_MMC_REG(host, DATA), |
1024 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | 1024 | .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, |
1025 | cfg.src_maxburst = burst; | 1025 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES, |
1026 | cfg.dst_maxburst = burst; | 1026 | .src_maxburst = burst, |
1027 | .dst_maxburst = burst, | ||
1028 | }; | ||
1027 | 1029 | ||
1028 | if (dmaengine_slave_config(c, &cfg)) | 1030 | if (dmaengine_slave_config(c, &cfg)) |
1029 | goto use_pio; | 1031 | goto use_pio; |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 24ebc9a8de89..5f2f24a7360d 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, | |||
1409 | static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, | 1409 | static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, |
1410 | struct mmc_request *req) | 1410 | struct mmc_request *req) |
1411 | { | 1411 | { |
1412 | struct dma_slave_config cfg; | ||
1413 | struct dma_async_tx_descriptor *tx; | 1412 | struct dma_async_tx_descriptor *tx; |
1414 | int ret = 0, i; | 1413 | int ret = 0, i; |
1415 | struct mmc_data *data = req->data; | 1414 | struct mmc_data *data = req->data; |
1416 | struct dma_chan *chan; | 1415 | struct dma_chan *chan; |
1416 | struct dma_slave_config cfg = { | ||
1417 | .src_addr = host->mapbase + OMAP_HSMMC_DATA, | ||
1418 | .dst_addr = host->mapbase + OMAP_HSMMC_DATA, | ||
1419 | .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
1420 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, | ||
1421 | .src_maxburst = data->blksz / 4, | ||
1422 | .dst_maxburst = data->blksz / 4, | ||
1423 | }; | ||
1417 | 1424 | ||
1418 | /* Sanity check: all the SG entries must be aligned by block size. */ | 1425 | /* Sanity check: all the SG entries must be aligned by block size. */ |
1419 | for (i = 0; i < data->sg_len; i++) { | 1426 | for (i = 0; i < data->sg_len; i++) { |
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, | |||
1433 | 1440 | ||
1434 | chan = omap_hsmmc_get_dma_chan(host, data); | 1441 | chan = omap_hsmmc_get_dma_chan(host, data); |
1435 | 1442 | ||
1436 | cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; | ||
1437 | cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; | ||
1438 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1439 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1440 | cfg.src_maxburst = data->blksz / 4; | ||
1441 | cfg.dst_maxburst = data->blksz / 4; | ||
1442 | |||
1443 | ret = dmaengine_slave_config(chan, &cfg); | 1443 | ret = dmaengine_slave_config(chan, &cfg); |
1444 | if (ret) | 1444 | if (ret) |
1445 | return ret; | 1445 | return ret; |
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c index c95ba83366a0..ed92ce729dde 100644 --- a/drivers/mmc/host/sdhci-st.c +++ b/drivers/mmc/host/sdhci-st.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | struct st_mmc_platform_data { | 29 | struct st_mmc_platform_data { |
30 | struct reset_control *rstc; | 30 | struct reset_control *rstc; |
31 | struct clk *icnclk; | ||
31 | void __iomem *top_ioaddr; | 32 | void __iomem *top_ioaddr; |
32 | }; | 33 | }; |
33 | 34 | ||
@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev) | |||
353 | struct sdhci_host *host; | 354 | struct sdhci_host *host; |
354 | struct st_mmc_platform_data *pdata; | 355 | struct st_mmc_platform_data *pdata; |
355 | struct sdhci_pltfm_host *pltfm_host; | 356 | struct sdhci_pltfm_host *pltfm_host; |
356 | struct clk *clk; | 357 | struct clk *clk, *icnclk; |
357 | int ret = 0; | 358 | int ret = 0; |
358 | u16 host_version; | 359 | u16 host_version; |
359 | struct resource *res; | 360 | struct resource *res; |
@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev) | |||
365 | return PTR_ERR(clk); | 366 | return PTR_ERR(clk); |
366 | } | 367 | } |
367 | 368 | ||
369 | /* ICN clock isn't compulsory, but use it if it's provided. */ | ||
370 | icnclk = devm_clk_get(&pdev->dev, "icn"); | ||
371 | if (IS_ERR(icnclk)) | ||
372 | icnclk = NULL; | ||
373 | |||
368 | rstc = devm_reset_control_get(&pdev->dev, NULL); | 374 | rstc = devm_reset_control_get(&pdev->dev, NULL); |
369 | if (IS_ERR(rstc)) | 375 | if (IS_ERR(rstc)) |
370 | rstc = NULL; | 376 | rstc = NULL; |
@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev) | |||
389 | } | 395 | } |
390 | 396 | ||
391 | clk_prepare_enable(clk); | 397 | clk_prepare_enable(clk); |
398 | clk_prepare_enable(icnclk); | ||
392 | 399 | ||
393 | /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */ | 400 | /* Configure the FlashSS Top registers for setting eMMC TX/RX delay */ |
394 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 401 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev) | |||
400 | } | 407 | } |
401 | 408 | ||
402 | pltfm_host->clk = clk; | 409 | pltfm_host->clk = clk; |
410 | pdata->icnclk = icnclk; | ||
403 | 411 | ||
404 | /* Configure the Arasan HC inside the flashSS */ | 412 | /* Configure the Arasan HC inside the flashSS */ |
405 | st_mmcss_cconfig(np, host); | 413 | st_mmcss_cconfig(np, host); |
@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev) | |||
422 | return 0; | 430 | return 0; |
423 | 431 | ||
424 | err_out: | 432 | err_out: |
433 | clk_disable_unprepare(icnclk); | ||
425 | clk_disable_unprepare(clk); | 434 | clk_disable_unprepare(clk); |
426 | err_of: | 435 | err_of: |
427 | sdhci_pltfm_free(pdev); | 436 | sdhci_pltfm_free(pdev); |
@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev) | |||
442 | 451 | ||
443 | ret = sdhci_pltfm_unregister(pdev); | 452 | ret = sdhci_pltfm_unregister(pdev); |
444 | 453 | ||
454 | clk_disable_unprepare(pdata->icnclk); | ||
455 | |||
445 | if (rstc) | 456 | if (rstc) |
446 | reset_control_assert(rstc); | 457 | reset_control_assert(rstc); |
447 | 458 | ||
@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev) | |||
462 | if (pdata->rstc) | 473 | if (pdata->rstc) |
463 | reset_control_assert(pdata->rstc); | 474 | reset_control_assert(pdata->rstc); |
464 | 475 | ||
476 | clk_disable_unprepare(pdata->icnclk); | ||
465 | clk_disable_unprepare(pltfm_host->clk); | 477 | clk_disable_unprepare(pltfm_host->clk); |
466 | out: | 478 | out: |
467 | return ret; | 479 | return ret; |
@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev) | |||
475 | struct device_node *np = dev->of_node; | 487 | struct device_node *np = dev->of_node; |
476 | 488 | ||
477 | clk_prepare_enable(pltfm_host->clk); | 489 | clk_prepare_enable(pltfm_host->clk); |
490 | clk_prepare_enable(pdata->icnclk); | ||
478 | 491 | ||
479 | if (pdata->rstc) | 492 | if (pdata->rstc) |
480 | reset_control_deassert(pdata->rstc); | 493 | reset_control_deassert(pdata->rstc); |
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index 25a4fbd4d24a..d54f666417e1 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c | |||
@@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, | |||
366 | u8 *data, u32 bytes) | 366 | u8 *data, u32 bytes) |
367 | { | 367 | { |
368 | dma_addr_t addr; | 368 | dma_addr_t addr; |
369 | u32 *p, len, i; | 369 | u8 *p; |
370 | u32 len, i, val; | ||
370 | int ret = 0; | 371 | int ret = 0; |
371 | 372 | ||
372 | addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); | 373 | addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); |
@@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, | |||
392 | 393 | ||
393 | /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ | 394 | /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ |
394 | len = (config->strength * ECC_PARITY_BITS + 7) >> 3; | 395 | len = (config->strength * ECC_PARITY_BITS + 7) >> 3; |
395 | p = (u32 *)(data + bytes); | 396 | p = data + bytes; |
396 | 397 | ||
397 | /* write the parity bytes generated by the ECC back to the OOB region */ | 398 | /* write the parity bytes generated by the ECC back to the OOB region */ |
398 | for (i = 0; i < len; i++) | 399 | for (i = 0; i < len; i++) { |
399 | p[i] = readl(ecc->regs + ECC_ENCPAR(i)); | 400 | if ((i % 4) == 0) |
401 | val = readl(ecc->regs + ECC_ENCPAR(i / 4)); | ||
402 | p[i] = (val >> ((i % 4) * 8)) & 0xff; | ||
403 | } | ||
400 | timeout: | 404 | timeout: |
401 | 405 | ||
402 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); | 406 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); |
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c index ddaa2acb9dd7..5223a2182ee4 100644 --- a/drivers/mtd/nand/mtk_nand.c +++ b/drivers/mtd/nand/mtk_nand.c | |||
@@ -93,6 +93,9 @@ | |||
93 | #define NFI_FSM_MASK (0xf << 16) | 93 | #define NFI_FSM_MASK (0xf << 16) |
94 | #define NFI_ADDRCNTR (0x70) | 94 | #define NFI_ADDRCNTR (0x70) |
95 | #define CNTR_MASK GENMASK(16, 12) | 95 | #define CNTR_MASK GENMASK(16, 12) |
96 | #define ADDRCNTR_SEC_SHIFT (12) | ||
97 | #define ADDRCNTR_SEC(val) \ | ||
98 | (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT) | ||
96 | #define NFI_STRADDR (0x80) | 99 | #define NFI_STRADDR (0x80) |
97 | #define NFI_BYTELEN (0x84) | 100 | #define NFI_BYTELEN (0x84) |
98 | #define NFI_CSEL (0x90) | 101 | #define NFI_CSEL (0x90) |
@@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
699 | } | 702 | } |
700 | 703 | ||
701 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, | 704 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, |
702 | (reg & CNTR_MASK) >= chip->ecc.steps, | 705 | ADDRCNTR_SEC(reg) >= chip->ecc.steps, |
703 | 10, MTK_TIMEOUT); | 706 | 10, MTK_TIMEOUT); |
704 | if (ret) | 707 | if (ret) |
705 | dev_err(dev, "hwecc write timeout\n"); | 708 | dev_err(dev, "hwecc write timeout\n"); |
@@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | |||
902 | dev_warn(nfc->dev, "read ahb/dma done timeout\n"); | 905 | dev_warn(nfc->dev, "read ahb/dma done timeout\n"); |
903 | 906 | ||
904 | rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, | 907 | rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, |
905 | (reg & CNTR_MASK) >= sectors, 10, | 908 | ADDRCNTR_SEC(reg) >= sectors, 10, |
906 | MTK_TIMEOUT); | 909 | MTK_TIMEOUT); |
907 | if (rc < 0) { | 910 | if (rc < 0) { |
908 | dev_err(nfc->dev, "subpage done timeout\n"); | 911 | dev_err(nfc->dev, "subpage done timeout\n"); |
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 5173fadc9a4e..57cbe2b83849 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section, | |||
943 | struct nand_chip *nand_chip = mtd_to_nand(mtd); | 943 | struct nand_chip *nand_chip = mtd_to_nand(mtd); |
944 | int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; | 944 | int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; |
945 | 945 | ||
946 | if (section > nand_chip->ecc.steps) | 946 | if (section >= nand_chip->ecc.steps) |
947 | return -ERANGE; | 947 | return -ERANGE; |
948 | 948 | ||
949 | if (!section) { | 949 | if (!section) { |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1f276fa30ba6..9599ed6f1213 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0); | |||
152 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " | 152 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " |
153 | "0 for slow, 1 for fast"); | 153 | "0 for slow, 1 for fast"); |
154 | module_param(ad_select, charp, 0); | 154 | module_param(ad_select, charp, 0); |
155 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " | 155 | MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " |
156 | "0 for stable (default), 1 for bandwidth, " | 156 | "0 for stable (default), 1 for bandwidth, " |
157 | "2 for count"); | 157 | "2 for count"); |
158 | module_param(min_links, int, 0); | 158 | module_param(min_links, int, 0); |
@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1341 | slave_dev->name); | 1341 | slave_dev->name); |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | /* already enslaved */ | 1344 | /* already in-use? */ |
1345 | if (slave_dev->flags & IFF_SLAVE) { | 1345 | if (netdev_is_rx_handler_busy(slave_dev)) { |
1346 | netdev_dbg(bond_dev, "Error: Device was already enslaved\n"); | 1346 | netdev_err(bond_dev, |
1347 | "Error: Device is in use and cannot be enslaved\n"); | ||
1347 | return -EBUSY; | 1348 | return -EBUSY; |
1348 | } | 1349 | } |
1349 | 1350 | ||
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 41c0fc9f3b14..16f7cadda5c3 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device) | |||
1268 | struct flexcan_priv *priv = netdev_priv(dev); | 1268 | struct flexcan_priv *priv = netdev_priv(dev); |
1269 | int err; | 1269 | int err; |
1270 | 1270 | ||
1271 | err = flexcan_chip_disable(priv); | ||
1272 | if (err) | ||
1273 | return err; | ||
1274 | |||
1275 | if (netif_running(dev)) { | 1271 | if (netif_running(dev)) { |
1272 | err = flexcan_chip_disable(priv); | ||
1273 | if (err) | ||
1274 | return err; | ||
1276 | netif_stop_queue(dev); | 1275 | netif_stop_queue(dev); |
1277 | netif_device_detach(dev); | 1276 | netif_device_detach(dev); |
1278 | } | 1277 | } |
@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device) | |||
1285 | { | 1284 | { |
1286 | struct net_device *dev = dev_get_drvdata(device); | 1285 | struct net_device *dev = dev_get_drvdata(device); |
1287 | struct flexcan_priv *priv = netdev_priv(dev); | 1286 | struct flexcan_priv *priv = netdev_priv(dev); |
1287 | int err; | ||
1288 | 1288 | ||
1289 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | 1289 | priv->can.state = CAN_STATE_ERROR_ACTIVE; |
1290 | if (netif_running(dev)) { | 1290 | if (netif_running(dev)) { |
1291 | netif_device_attach(dev); | 1291 | netif_device_attach(dev); |
1292 | netif_start_queue(dev); | 1292 | netif_start_queue(dev); |
1293 | err = flexcan_chip_enable(priv); | ||
1294 | if (err) | ||
1295 | return err; | ||
1293 | } | 1296 | } |
1294 | return flexcan_chip_enable(priv); | 1297 | return 0; |
1295 | } | 1298 | } |
1296 | 1299 | ||
1297 | static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume); | 1300 | static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume); |
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 2d1d22eec750..368bb0710d8f 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c | |||
@@ -81,6 +81,10 @@ | |||
81 | #define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15) | 81 | #define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15) |
82 | 82 | ||
83 | #define IFI_CANFD_TDELAY 0x1c | 83 | #define IFI_CANFD_TDELAY 0x1c |
84 | #define IFI_CANFD_TDELAY_DEFAULT 0xb | ||
85 | #define IFI_CANFD_TDELAY_MASK 0x3fff | ||
86 | #define IFI_CANFD_TDELAY_ABS BIT(14) | ||
87 | #define IFI_CANFD_TDELAY_EN BIT(15) | ||
84 | 88 | ||
85 | #define IFI_CANFD_ERROR 0x20 | 89 | #define IFI_CANFD_ERROR 0x20 |
86 | #define IFI_CANFD_ERROR_TX_OFFSET 0 | 90 | #define IFI_CANFD_ERROR_TX_OFFSET 0 |
@@ -641,7 +645,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) | |||
641 | struct ifi_canfd_priv *priv = netdev_priv(ndev); | 645 | struct ifi_canfd_priv *priv = netdev_priv(ndev); |
642 | const struct can_bittiming *bt = &priv->can.bittiming; | 646 | const struct can_bittiming *bt = &priv->can.bittiming; |
643 | const struct can_bittiming *dbt = &priv->can.data_bittiming; | 647 | const struct can_bittiming *dbt = &priv->can.data_bittiming; |
644 | u16 brp, sjw, tseg1, tseg2; | 648 | u16 brp, sjw, tseg1, tseg2, tdc; |
645 | 649 | ||
646 | /* Configure bit timing */ | 650 | /* Configure bit timing */ |
647 | brp = bt->brp - 2; | 651 | brp = bt->brp - 2; |
@@ -664,6 +668,11 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) | |||
664 | (brp << IFI_CANFD_TIME_PRESCALE_OFF) | | 668 | (brp << IFI_CANFD_TIME_PRESCALE_OFF) | |
665 | (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8), | 669 | (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8), |
666 | priv->base + IFI_CANFD_FTIME); | 670 | priv->base + IFI_CANFD_FTIME); |
671 | |||
672 | /* Configure transmitter delay */ | ||
673 | tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK; | ||
674 | writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc, | ||
675 | priv->base + IFI_CANFD_TDELAY); | ||
667 | } | 676 | } |
668 | 677 | ||
669 | static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id, | 678 | static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id, |
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 8f12bddd5dc9..a0b453ea34c9 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h | |||
@@ -258,7 +258,7 @@ | |||
258 | * BCM5325 and BCM5365 share most definitions below | 258 | * BCM5325 and BCM5365 share most definitions below |
259 | */ | 259 | */ |
260 | #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) | 260 | #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) |
261 | #define ARLTBL_MAC_MASK 0xffffffffffff | 261 | #define ARLTBL_MAC_MASK 0xffffffffffffULL |
262 | #define ARLTBL_VID_S 48 | 262 | #define ARLTBL_VID_S 48 |
263 | #define ARLTBL_VID_MASK_25 0xff | 263 | #define ARLTBL_VID_MASK_25 0xff |
264 | #define ARLTBL_VID_MASK 0xfff | 264 | #define ARLTBL_VID_MASK 0xfff |
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 463bed8cbe4c..dd446e466699 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h | |||
@@ -205,8 +205,8 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \ | |||
205 | static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \ | 205 | static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \ |
206 | u32 mask) \ | 206 | u32 mask) \ |
207 | { \ | 207 | { \ |
208 | intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ | ||
209 | priv->irq##which##_mask &= ~(mask); \ | 208 | priv->irq##which##_mask &= ~(mask); \ |
209 | intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ | ||
210 | } \ | 210 | } \ |
211 | static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ | 211 | static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ |
212 | u32 mask) \ | 212 | u32 mask) \ |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d36aedde8cb9..710679067594 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -2656,15 +2656,19 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) | |||
2656 | return ret; | 2656 | return ret; |
2657 | } | 2657 | } |
2658 | 2658 | ||
2659 | /* Rate Control: disable ingress rate limiting. */ | ||
2659 | if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || | 2660 | if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || |
2660 | mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || | 2661 | mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || |
2661 | mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) || | ||
2662 | mv88e6xxx_6320_family(chip)) { | 2662 | mv88e6xxx_6320_family(chip)) { |
2663 | /* Rate Control: disable ingress rate limiting. */ | ||
2664 | ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), | 2663 | ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), |
2665 | PORT_RATE_CONTROL, 0x0001); | 2664 | PORT_RATE_CONTROL, 0x0001); |
2666 | if (ret) | 2665 | if (ret) |
2667 | return ret; | 2666 | return ret; |
2667 | } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) { | ||
2668 | ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), | ||
2669 | PORT_RATE_CONTROL, 0x0000); | ||
2670 | if (ret) | ||
2671 | return ret; | ||
2668 | } | 2672 | } |
2669 | 2673 | ||
2670 | /* Port Control 1: disable trunking, disable sending | 2674 | /* Port Control 1: disable trunking, disable sending |
@@ -3187,6 +3191,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) | |||
3187 | return err; | 3191 | return err; |
3188 | } | 3192 | } |
3189 | 3193 | ||
3194 | #ifdef CONFIG_NET_DSA_HWMON | ||
3190 | static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, | 3195 | static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, |
3191 | int reg) | 3196 | int reg) |
3192 | { | 3197 | { |
@@ -3212,6 +3217,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, | |||
3212 | 3217 | ||
3213 | return ret; | 3218 | return ret; |
3214 | } | 3219 | } |
3220 | #endif | ||
3215 | 3221 | ||
3216 | static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) | 3222 | static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) |
3217 | { | 3223 | { |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 37a0f463b8de..18bb9556dd00 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) | |||
793 | netdev_err(ndev, "Could not connect to PHY\n"); | 793 | netdev_err(ndev, "Could not connect to PHY\n"); |
794 | return -ENODEV; | 794 | return -ENODEV; |
795 | } | 795 | } |
796 | #else | ||
797 | return -ENODEV; | ||
796 | #endif | 798 | #endif |
797 | } | 799 | } |
798 | 800 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4bff0f3040df..b0da9693f28a 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) | |||
771 | priv->dev = dev; | 771 | priv->dev = dev; |
772 | 772 | ||
773 | priv->regs = devm_ioremap_resource(dev, &res_regs); | 773 | priv->regs = devm_ioremap_resource(dev, &res_regs); |
774 | if (IS_ERR(priv->regs)) | 774 | if (IS_ERR(priv->regs)) { |
775 | return PTR_ERR(priv->regs); | 775 | err = PTR_ERR(priv->regs); |
776 | goto out_put_node; | ||
777 | } | ||
776 | 778 | ||
777 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); | 779 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); |
778 | 780 | ||
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 6453148d066a..4eb17daefc4f 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -1545,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = { | |||
1545 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1545 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1546 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), | 1546 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), |
1547 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1547 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1548 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500), | ||
1549 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | ||
1548 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), | 1550 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), |
1549 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1551 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1550 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, | 1552 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, |
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index 0959e6824cb6..1fc2d852249f 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #define ALX_DEV_ID_AR8161 0x1091 | 38 | #define ALX_DEV_ID_AR8161 0x1091 |
39 | #define ALX_DEV_ID_E2200 0xe091 | 39 | #define ALX_DEV_ID_E2200 0xe091 |
40 | #define ALX_DEV_ID_E2400 0xe0a1 | 40 | #define ALX_DEV_ID_E2400 0xe0a1 |
41 | #define ALX_DEV_ID_E2500 0xe0b1 | ||
41 | #define ALX_DEV_ID_AR8162 0x1090 | 42 | #define ALX_DEV_ID_AR8162 0x1090 |
42 | #define ALX_DEV_ID_AR8171 0x10A1 | 43 | #define ALX_DEV_ID_AR8171 0x10A1 |
43 | #define ALX_DEV_ID_AR8172 0x10A0 | 44 | #define ALX_DEV_ID_AR8172 0x10A0 |
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 9a9745c4047c..625235db644f 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c | |||
@@ -159,7 +159,7 @@ static int bgmac_probe(struct bcma_device *core) | |||
159 | 159 | ||
160 | if (!bgmac_is_bcm4707_family(core)) { | 160 | if (!bgmac_is_bcm4707_family(core)) { |
161 | mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); | 161 | mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); |
162 | if (!IS_ERR(mii_bus)) { | 162 | if (IS_ERR(mii_bus)) { |
163 | err = PTR_ERR(mii_bus); | 163 | err = PTR_ERR(mii_bus); |
164 | goto err; | 164 | goto err; |
165 | } | 165 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 8fc3f3c137f8..505ceaf451e2 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev) | |||
6356 | struct bnx2 *bp = netdev_priv(dev); | 6356 | struct bnx2 *bp = netdev_priv(dev); |
6357 | int rc; | 6357 | int rc; |
6358 | 6358 | ||
6359 | rc = bnx2_request_firmware(bp); | ||
6360 | if (rc < 0) | ||
6361 | goto out; | ||
6362 | |||
6363 | netif_carrier_off(dev); | 6359 | netif_carrier_off(dev); |
6364 | 6360 | ||
6365 | bnx2_disable_int(bp); | 6361 | bnx2_disable_int(bp); |
@@ -6428,7 +6424,6 @@ open_err: | |||
6428 | bnx2_free_irq(bp); | 6424 | bnx2_free_irq(bp); |
6429 | bnx2_free_mem(bp); | 6425 | bnx2_free_mem(bp); |
6430 | bnx2_del_napi(bp); | 6426 | bnx2_del_napi(bp); |
6431 | bnx2_release_firmware(bp); | ||
6432 | goto out; | 6427 | goto out; |
6433 | } | 6428 | } |
6434 | 6429 | ||
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8575 | 8570 | ||
8576 | pci_set_drvdata(pdev, dev); | 8571 | pci_set_drvdata(pdev, dev); |
8577 | 8572 | ||
8573 | rc = bnx2_request_firmware(bp); | ||
8574 | if (rc < 0) | ||
8575 | goto error; | ||
8576 | |||
8577 | |||
8578 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | ||
8578 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); | 8579 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); |
8579 | 8580 | ||
8580 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | 8581 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8607 | return 0; | 8608 | return 0; |
8608 | 8609 | ||
8609 | error: | 8610 | error: |
8611 | bnx2_release_firmware(bp); | ||
8610 | pci_iounmap(pdev, bp->regview); | 8612 | pci_iounmap(pdev, bp->regview); |
8611 | pci_release_regions(pdev); | 8613 | pci_release_regions(pdev); |
8612 | pci_disable_device(pdev); | 8614 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 97e892511666..fa3386bb14f7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) | |||
772 | (bp->common.bc_ver & 0xff00) >> 8, | 772 | (bp->common.bc_ver & 0xff00) >> 8, |
773 | (bp->common.bc_ver & 0xff)); | 773 | (bp->common.bc_ver & 0xff)); |
774 | 774 | ||
775 | if (pci_channel_offline(bp->pdev)) { | ||
776 | BNX2X_ERR("Cannot dump MCP info while in PCI error\n"); | ||
777 | return; | ||
778 | } | ||
779 | |||
775 | val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); | 780 | val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); |
776 | if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) | 781 | if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) |
777 | BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); | 782 | BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); |
@@ -9415,10 +9420,16 @@ unload_error: | |||
9415 | /* Release IRQs */ | 9420 | /* Release IRQs */ |
9416 | bnx2x_free_irq(bp); | 9421 | bnx2x_free_irq(bp); |
9417 | 9422 | ||
9418 | /* Reset the chip */ | 9423 | /* Reset the chip, unless PCI function is offline. If we reach this |
9419 | rc = bnx2x_reset_hw(bp, reset_code); | 9424 | * point following a PCI error handling, it means device is really |
9420 | if (rc) | 9425 | * in a bad state and we're about to remove it, so reset the chip |
9421 | BNX2X_ERR("HW_RESET failed\n"); | 9426 | * is not a good idea. |
9427 | */ | ||
9428 | if (!pci_channel_offline(bp->pdev)) { | ||
9429 | rc = bnx2x_reset_hw(bp, reset_code); | ||
9430 | if (rc) | ||
9431 | BNX2X_ERR("HW_RESET failed\n"); | ||
9432 | } | ||
9422 | 9433 | ||
9423 | /* Report UNLOAD_DONE to MCP */ | 9434 | /* Report UNLOAD_DONE to MCP */ |
9424 | bnx2x_send_unload_done(bp, keep_link); | 9435 | bnx2x_send_unload_done(bp, keep_link); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2cf79100c9cb..228c964e709a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
353 | push_len = (length + sizeof(*tx_push) + 7) / 8; | 353 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
354 | if (push_len > 16) { | 354 | if (push_len > 16) { |
355 | __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); | 355 | __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16); |
356 | __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1, | 356 | __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1, |
357 | push_len - 16); | 357 | (push_len - 16) << 1); |
358 | } else { | 358 | } else { |
359 | __iowrite64_copy(txr->tx_doorbell, tx_push_buf, | 359 | __iowrite64_copy(txr->tx_doorbell, tx_push_buf, |
360 | push_len); | 360 | push_len); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff300f7cf529..a2551bcd1027 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, | |||
12552 | info->data = TG3_RSS_MAX_NUM_QS; | 12552 | info->data = TG3_RSS_MAX_NUM_QS; |
12553 | } | 12553 | } |
12554 | 12554 | ||
12555 | /* The first interrupt vector only | ||
12556 | * handles link interrupts. | ||
12557 | */ | ||
12558 | info->data -= 1; | ||
12559 | return 0; | 12555 | return 0; |
12560 | 12556 | ||
12561 | default: | 12557 | default: |
@@ -14014,7 +14010,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | |||
14014 | } | 14010 | } |
14015 | 14011 | ||
14016 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || | 14012 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || |
14013 | (!ec->rx_coalesce_usecs) || | ||
14017 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || | 14014 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || |
14015 | (!ec->tx_coalesce_usecs) || | ||
14018 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || | 14016 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || |
14019 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || | 14017 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || |
14020 | (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || | 14018 | (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || |
@@ -14025,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | |||
14025 | (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) | 14023 | (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) |
14026 | return -EINVAL; | 14024 | return -EINVAL; |
14027 | 14025 | ||
14028 | /* No rx interrupts will be generated if both are zero */ | ||
14029 | if ((ec->rx_coalesce_usecs == 0) && | ||
14030 | (ec->rx_max_coalesced_frames == 0)) | ||
14031 | return -EINVAL; | ||
14032 | |||
14033 | /* No tx interrupts will be generated if both are zero */ | ||
14034 | if ((ec->tx_coalesce_usecs == 0) && | ||
14035 | (ec->tx_max_coalesced_frames == 0)) | ||
14036 | return -EINVAL; | ||
14037 | |||
14038 | /* Only copy relevant parameters, ignore all others. */ | 14026 | /* Only copy relevant parameters, ignore all others. */ |
14039 | tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; | 14027 | tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; |
14040 | tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; | 14028 | tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; |
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 0e4fdc3dd729..31f61a744d66 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c | |||
@@ -31,15 +31,10 @@ | |||
31 | #define BNAD_NUM_TXF_COUNTERS 12 | 31 | #define BNAD_NUM_TXF_COUNTERS 12 |
32 | #define BNAD_NUM_RXF_COUNTERS 10 | 32 | #define BNAD_NUM_RXF_COUNTERS 10 |
33 | #define BNAD_NUM_CQ_COUNTERS (3 + 5) | 33 | #define BNAD_NUM_CQ_COUNTERS (3 + 5) |
34 | #define BNAD_NUM_RXQ_COUNTERS 6 | 34 | #define BNAD_NUM_RXQ_COUNTERS 7 |
35 | #define BNAD_NUM_TXQ_COUNTERS 5 | 35 | #define BNAD_NUM_TXQ_COUNTERS 5 |
36 | 36 | ||
37 | #define BNAD_ETHTOOL_STATS_NUM \ | 37 | static const char *bnad_net_stats_strings[] = { |
38 | (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ | ||
39 | sizeof(struct bnad_drv_stats) / sizeof(u64) + \ | ||
40 | offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) | ||
41 | |||
42 | static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | ||
43 | "rx_packets", | 38 | "rx_packets", |
44 | "tx_packets", | 39 | "tx_packets", |
45 | "rx_bytes", | 40 | "rx_bytes", |
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | |||
50 | "tx_dropped", | 45 | "tx_dropped", |
51 | "multicast", | 46 | "multicast", |
52 | "collisions", | 47 | "collisions", |
53 | |||
54 | "rx_length_errors", | 48 | "rx_length_errors", |
55 | "rx_over_errors", | ||
56 | "rx_crc_errors", | 49 | "rx_crc_errors", |
57 | "rx_frame_errors", | 50 | "rx_frame_errors", |
58 | "rx_fifo_errors", | ||
59 | "rx_missed_errors", | ||
60 | |||
61 | "tx_aborted_errors", | ||
62 | "tx_carrier_errors", | ||
63 | "tx_fifo_errors", | 51 | "tx_fifo_errors", |
64 | "tx_heartbeat_errors", | ||
65 | "tx_window_errors", | ||
66 | |||
67 | "rx_compressed", | ||
68 | "tx_compressed", | ||
69 | 52 | ||
70 | "netif_queue_stop", | 53 | "netif_queue_stop", |
71 | "netif_queue_wakeup", | 54 | "netif_queue_wakeup", |
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | |||
254 | "fc_tx_fid_parity_errors", | 237 | "fc_tx_fid_parity_errors", |
255 | }; | 238 | }; |
256 | 239 | ||
240 | #define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings) | ||
241 | |||
257 | static int | 242 | static int |
258 | bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | 243 | bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) |
259 | { | 244 | { |
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) | |||
658 | string += ETH_GSTRING_LEN; | 643 | string += ETH_GSTRING_LEN; |
659 | sprintf(string, "rxq%d_allocbuf_failed", q_num); | 644 | sprintf(string, "rxq%d_allocbuf_failed", q_num); |
660 | string += ETH_GSTRING_LEN; | 645 | string += ETH_GSTRING_LEN; |
646 | sprintf(string, "rxq%d_mapbuf_failed", q_num); | ||
647 | string += ETH_GSTRING_LEN; | ||
661 | sprintf(string, "rxq%d_producer_index", q_num); | 648 | sprintf(string, "rxq%d_producer_index", q_num); |
662 | string += ETH_GSTRING_LEN; | 649 | string += ETH_GSTRING_LEN; |
663 | sprintf(string, "rxq%d_consumer_index", q_num); | 650 | sprintf(string, "rxq%d_consumer_index", q_num); |
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) | |||
678 | sprintf(string, "rxq%d_allocbuf_failed", | 665 | sprintf(string, "rxq%d_allocbuf_failed", |
679 | q_num); | 666 | q_num); |
680 | string += ETH_GSTRING_LEN; | 667 | string += ETH_GSTRING_LEN; |
668 | sprintf(string, "rxq%d_mapbuf_failed", | ||
669 | q_num); | ||
670 | string += ETH_GSTRING_LEN; | ||
681 | sprintf(string, "rxq%d_producer_index", | 671 | sprintf(string, "rxq%d_producer_index", |
682 | q_num); | 672 | q_num); |
683 | string += ETH_GSTRING_LEN; | 673 | string += ETH_GSTRING_LEN; |
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, | |||
854 | u64 *buf) | 844 | u64 *buf) |
855 | { | 845 | { |
856 | struct bnad *bnad = netdev_priv(netdev); | 846 | struct bnad *bnad = netdev_priv(netdev); |
857 | int i, j, bi; | 847 | int i, j, bi = 0; |
858 | unsigned long flags; | 848 | unsigned long flags; |
859 | struct rtnl_link_stats64 *net_stats64; | 849 | struct rtnl_link_stats64 net_stats64; |
860 | u64 *stats64; | 850 | u64 *stats64; |
861 | u32 bmap; | 851 | u32 bmap; |
862 | 852 | ||
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, | |||
871 | * under the same lock | 861 | * under the same lock |
872 | */ | 862 | */ |
873 | spin_lock_irqsave(&bnad->bna_lock, flags); | 863 | spin_lock_irqsave(&bnad->bna_lock, flags); |
874 | bi = 0; | ||
875 | memset(buf, 0, stats->n_stats * sizeof(u64)); | ||
876 | |||
877 | net_stats64 = (struct rtnl_link_stats64 *)buf; | ||
878 | bnad_netdev_qstats_fill(bnad, net_stats64); | ||
879 | bnad_netdev_hwstats_fill(bnad, net_stats64); | ||
880 | 864 | ||
881 | bi = sizeof(*net_stats64) / sizeof(u64); | 865 | memset(&net_stats64, 0, sizeof(net_stats64)); |
866 | bnad_netdev_qstats_fill(bnad, &net_stats64); | ||
867 | bnad_netdev_hwstats_fill(bnad, &net_stats64); | ||
868 | |||
869 | buf[bi++] = net_stats64.rx_packets; | ||
870 | buf[bi++] = net_stats64.tx_packets; | ||
871 | buf[bi++] = net_stats64.rx_bytes; | ||
872 | buf[bi++] = net_stats64.tx_bytes; | ||
873 | buf[bi++] = net_stats64.rx_errors; | ||
874 | buf[bi++] = net_stats64.tx_errors; | ||
875 | buf[bi++] = net_stats64.rx_dropped; | ||
876 | buf[bi++] = net_stats64.tx_dropped; | ||
877 | buf[bi++] = net_stats64.multicast; | ||
878 | buf[bi++] = net_stats64.collisions; | ||
879 | buf[bi++] = net_stats64.rx_length_errors; | ||
880 | buf[bi++] = net_stats64.rx_crc_errors; | ||
881 | buf[bi++] = net_stats64.rx_frame_errors; | ||
882 | buf[bi++] = net_stats64.tx_fifo_errors; | ||
882 | 883 | ||
883 | /* Get netif_queue_stopped from stack */ | 884 | /* Get netif_queue_stopped from stack */ |
884 | bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); | 885 | bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 89c0cfa9719f..d954a97b0b0b 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -1323,6 +1323,24 @@ dma_error: | |||
1323 | return 0; | 1323 | return 0; |
1324 | } | 1324 | } |
1325 | 1325 | ||
1326 | static inline int macb_clear_csum(struct sk_buff *skb) | ||
1327 | { | ||
1328 | /* no change for packets without checksum offloading */ | ||
1329 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
1330 | return 0; | ||
1331 | |||
1332 | /* make sure we can modify the header */ | ||
1333 | if (unlikely(skb_cow_head(skb, 0))) | ||
1334 | return -1; | ||
1335 | |||
1336 | /* initialize checksum field | ||
1337 | * This is required - at least for Zynq, which otherwise calculates | ||
1338 | * wrong UDP header checksums for UDP packets with UDP data len <=2 | ||
1339 | */ | ||
1340 | *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; | ||
1341 | return 0; | ||
1342 | } | ||
1343 | |||
1326 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1344 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1327 | { | 1345 | { |
1328 | u16 queue_index = skb_get_queue_mapping(skb); | 1346 | u16 queue_index = skb_get_queue_mapping(skb); |
@@ -1362,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1362 | return NETDEV_TX_BUSY; | 1380 | return NETDEV_TX_BUSY; |
1363 | } | 1381 | } |
1364 | 1382 | ||
1383 | if (macb_clear_csum(skb)) { | ||
1384 | dev_kfree_skb_any(skb); | ||
1385 | return NETDEV_TX_OK; | ||
1386 | } | ||
1387 | |||
1365 | /* Map socket buffer for DMA transfer */ | 1388 | /* Map socket buffer for DMA transfer */ |
1366 | if (!macb_tx_map(bp, queue, skb)) { | 1389 | if (!macb_tx_map(bp, queue, skb)) { |
1367 | dev_kfree_skb_any(skb); | 1390 | dev_kfree_skb_any(skb); |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 36893d8958d4..b6fcf10621b6 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -403,11 +403,11 @@ | |||
403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 | 403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 |
404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 | 404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 |
405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 | 405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 |
406 | #define MACB_CAPS_JUMBO 0x00000020 | ||
406 | #define MACB_CAPS_FIFO_MODE 0x10000000 | 407 | #define MACB_CAPS_FIFO_MODE 0x10000000 |
407 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 | 408 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 |
408 | #define MACB_CAPS_SG_DISABLED 0x40000000 | 409 | #define MACB_CAPS_SG_DISABLED 0x40000000 |
409 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 | 410 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 |
410 | #define MACB_CAPS_JUMBO 0x00000010 | ||
411 | 411 | ||
412 | /* Bit manipulation macros */ | 412 | /* Bit manipulation macros */ |
413 | #define MACB_BIT(name) \ | 413 | #define MACB_BIT(name) \ |
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 83025bb4737c..e29815d9e6f4 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h | |||
@@ -279,6 +279,7 @@ struct nicvf { | |||
279 | u8 sqs_id; | 279 | u8 sqs_id; |
280 | bool sqs_mode; | 280 | bool sqs_mode; |
281 | bool hw_tso; | 281 | bool hw_tso; |
282 | bool t88; | ||
282 | 283 | ||
283 | /* Receive buffer alloc */ | 284 | /* Receive buffer alloc */ |
284 | u32 rb_page_offset; | 285 | u32 rb_page_offset; |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 16ed20357c5c..85cc782b9060 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c | |||
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) | |||
251 | int lmac; | 251 | int lmac; |
252 | u64 lmac_cfg; | 252 | u64 lmac_cfg; |
253 | 253 | ||
254 | /* Max value that can be set is 60 */ | 254 | /* There is a issue in HW where-in while sending GSO sized |
255 | if (size > 60) | 255 | * pkts as part of TSO, if pkt len falls below this size |
256 | size = 60; | 256 | * NIC will zero PAD packet and also updates IP total length. |
257 | * Hence set this value to lessthan min pkt size of MAC+IP+TCP | ||
258 | * headers, BGX will do the padding to transmit 64 byte pkt. | ||
259 | */ | ||
260 | if (size > 52) | ||
261 | size = 52; | ||
257 | 262 | ||
258 | for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { | 263 | for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { |
259 | lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); | 264 | lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index afb10e326b4f..fab35a593898 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h | |||
@@ -170,7 +170,6 @@ | |||
170 | #define NIC_QSET_SQ_0_7_DOOR (0x010838) | 170 | #define NIC_QSET_SQ_0_7_DOOR (0x010838) |
171 | #define NIC_QSET_SQ_0_7_STATUS (0x010840) | 171 | #define NIC_QSET_SQ_0_7_STATUS (0x010840) |
172 | #define NIC_QSET_SQ_0_7_DEBUG (0x010848) | 172 | #define NIC_QSET_SQ_0_7_DEBUG (0x010848) |
173 | #define NIC_QSET_SQ_0_7_CNM_CHG (0x010860) | ||
174 | #define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) | 173 | #define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) |
175 | 174 | ||
176 | #define NIC_QSET_RBDR_0_1_CFG (0x010C00) | 175 | #define NIC_QSET_RBDR_0_1_CFG (0x010C00) |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index d2d8ef270142..ad4fddb55421 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | |||
@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev, | |||
382 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); | 382 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); |
383 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); | 383 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); |
384 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); | 384 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); |
385 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); | 385 | /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which |
386 | * produces bus errors when read | ||
387 | */ | ||
388 | p[i++] = 0; | ||
386 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); | 389 | p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); |
387 | reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); | 390 | reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); |
388 | p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); | 391 | p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a19e73f11d73..3240349615bd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, | |||
513 | struct nicvf *nic = netdev_priv(netdev); | 513 | struct nicvf *nic = netdev_priv(netdev); |
514 | struct snd_queue *sq; | 514 | struct snd_queue *sq; |
515 | struct sq_hdr_subdesc *hdr; | 515 | struct sq_hdr_subdesc *hdr; |
516 | struct sq_hdr_subdesc *tso_sqe; | ||
516 | 517 | ||
517 | sq = &nic->qs->sq[cqe_tx->sq_idx]; | 518 | sq = &nic->qs->sq[cqe_tx->sq_idx]; |
518 | 519 | ||
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, | |||
527 | 528 | ||
528 | nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); | 529 | nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); |
529 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; | 530 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; |
530 | /* For TSO offloaded packets only one SQE will have a valid SKB */ | ||
531 | if (skb) { | 531 | if (skb) { |
532 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ | ||
533 | if (hdr->dont_send) { | ||
534 | /* Get actual TSO descriptors and free them */ | ||
535 | tso_sqe = | ||
536 | (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); | ||
537 | nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); | ||
538 | } | ||
532 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | 539 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
533 | prefetch(skb); | 540 | prefetch(skb); |
534 | dev_consume_skb_any(skb); | 541 | dev_consume_skb_any(skb); |
535 | sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; | 542 | sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; |
536 | } else { | 543 | } else { |
537 | /* In case of HW TSO, HW sends a CQE for each segment of a TSO | 544 | /* In case of SW TSO on 88xx, only last segment will have |
538 | * packet instead of a single CQE for the whole TSO packet | 545 | * a SKB attached, so just free SQEs here. |
539 | * transmitted. Each of this CQE points to the same SQE, so | ||
540 | * avoid freeing same SQE multiple times. | ||
541 | */ | 546 | */ |
542 | if (!nic->hw_tso) | 547 | if (!nic->hw_tso) |
543 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | 548 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1502 | struct net_device *netdev; | 1507 | struct net_device *netdev; |
1503 | struct nicvf *nic; | 1508 | struct nicvf *nic; |
1504 | int err, qcount; | 1509 | int err, qcount; |
1510 | u16 sdevid; | ||
1505 | 1511 | ||
1506 | err = pci_enable_device(pdev); | 1512 | err = pci_enable_device(pdev); |
1507 | if (err) { | 1513 | if (err) { |
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1575 | if (!pass1_silicon(nic->pdev)) | 1581 | if (!pass1_silicon(nic->pdev)) |
1576 | nic->hw_tso = true; | 1582 | nic->hw_tso = true; |
1577 | 1583 | ||
1584 | pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); | ||
1585 | if (sdevid == 0xA134) | ||
1586 | nic->t88 = true; | ||
1587 | |||
1578 | /* Check if this VF is in QS only mode */ | 1588 | /* Check if this VF is in QS only mode */ |
1579 | if (nic->sqs_mode) | 1589 | if (nic->sqs_mode) |
1580 | return 0; | 1590 | return 0; |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 0ff8e60deccb..dda3ea3f3bb6 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb) | |||
938 | return num_edescs + sh->gso_segs; | 938 | return num_edescs + sh->gso_segs; |
939 | } | 939 | } |
940 | 940 | ||
941 | #define POST_CQE_DESC_COUNT 2 | ||
942 | |||
941 | /* Get the number of SQ descriptors needed to xmit this skb */ | 943 | /* Get the number of SQ descriptors needed to xmit this skb */ |
942 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | 944 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) |
943 | { | 945 | { |
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | |||
948 | return subdesc_cnt; | 950 | return subdesc_cnt; |
949 | } | 951 | } |
950 | 952 | ||
953 | /* Dummy descriptors to get TSO pkt completion notification */ | ||
954 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) | ||
955 | subdesc_cnt += POST_CQE_DESC_COUNT; | ||
956 | |||
951 | if (skb_shinfo(skb)->nr_frags) | 957 | if (skb_shinfo(skb)->nr_frags) |
952 | subdesc_cnt += skb_shinfo(skb)->nr_frags; | 958 | subdesc_cnt += skb_shinfo(skb)->nr_frags; |
953 | 959 | ||
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, | |||
965 | struct sq_hdr_subdesc *hdr; | 971 | struct sq_hdr_subdesc *hdr; |
966 | 972 | ||
967 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | 973 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); |
968 | sq->skbuff[qentry] = (u64)skb; | ||
969 | |||
970 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | 974 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); |
971 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | 975 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; |
972 | /* Enable notification via CQE after processing SQE */ | 976 | |
973 | hdr->post_cqe = 1; | 977 | if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { |
974 | /* No of subdescriptors following this */ | 978 | /* post_cqe = 0, to avoid HW posting a CQE for every TSO |
975 | hdr->subdesc_cnt = subdesc_cnt; | 979 | * segment transmitted on 88xx. |
980 | */ | ||
981 | hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; | ||
982 | } else { | ||
983 | sq->skbuff[qentry] = (u64)skb; | ||
984 | /* Enable notification via CQE after processing SQE */ | ||
985 | hdr->post_cqe = 1; | ||
986 | /* No of subdescriptors following this */ | ||
987 | hdr->subdesc_cnt = subdesc_cnt; | ||
988 | } | ||
976 | hdr->tot_len = len; | 989 | hdr->tot_len = len; |
977 | 990 | ||
978 | /* Offload checksum calculation to HW */ | 991 | /* Offload checksum calculation to HW */ |
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | |||
1023 | gather->addr = data; | 1036 | gather->addr = data; |
1024 | } | 1037 | } |
1025 | 1038 | ||
1039 | /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO | ||
1040 | * packet so that a CQE is posted as a notifation for transmission of | ||
1041 | * TSO packet. | ||
1042 | */ | ||
1043 | static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, | ||
1044 | int tso_sqe, struct sk_buff *skb) | ||
1045 | { | ||
1046 | struct sq_imm_subdesc *imm; | ||
1047 | struct sq_hdr_subdesc *hdr; | ||
1048 | |||
1049 | sq->skbuff[qentry] = (u64)skb; | ||
1050 | |||
1051 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | ||
1052 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | ||
1053 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | ||
1054 | /* Enable notification via CQE after processing SQE */ | ||
1055 | hdr->post_cqe = 1; | ||
1056 | /* There is no packet to transmit here */ | ||
1057 | hdr->dont_send = 1; | ||
1058 | hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; | ||
1059 | hdr->tot_len = 1; | ||
1060 | /* Actual TSO header SQE index, needed for cleanup */ | ||
1061 | hdr->rsvd2 = tso_sqe; | ||
1062 | |||
1063 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | ||
1064 | imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); | ||
1065 | memset(imm, 0, SND_QUEUE_DESC_SIZE); | ||
1066 | imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; | ||
1067 | imm->len = 1; | ||
1068 | } | ||
1069 | |||
1026 | /* Segment a TSO packet into 'gso_size' segments and append | 1070 | /* Segment a TSO packet into 'gso_size' segments and append |
1027 | * them to SQ for transfer | 1071 | * them to SQ for transfer |
1028 | */ | 1072 | */ |
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | |||
1096 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | 1140 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) |
1097 | { | 1141 | { |
1098 | int i, size; | 1142 | int i, size; |
1099 | int subdesc_cnt; | 1143 | int subdesc_cnt, tso_sqe = 0; |
1100 | int sq_num, qentry; | 1144 | int sq_num, qentry; |
1101 | struct queue_set *qs; | 1145 | struct queue_set *qs; |
1102 | struct snd_queue *sq; | 1146 | struct snd_queue *sq; |
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | |||
1131 | /* Add SQ header subdesc */ | 1175 | /* Add SQ header subdesc */ |
1132 | nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, | 1176 | nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, |
1133 | skb, skb->len); | 1177 | skb, skb->len); |
1178 | tso_sqe = qentry; | ||
1134 | 1179 | ||
1135 | /* Add SQ gather subdescs */ | 1180 | /* Add SQ gather subdescs */ |
1136 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | 1181 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | |||
1154 | } | 1199 | } |
1155 | 1200 | ||
1156 | doorbell: | 1201 | doorbell: |
1202 | if (nic->t88 && skb_shinfo(skb)->gso_size) { | ||
1203 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | ||
1204 | nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); | ||
1205 | } | ||
1206 | |||
1157 | /* make sure all memory stores are done before ringing doorbell */ | 1207 | /* make sure all memory stores are done before ringing doorbell */ |
1158 | smp_wmb(); | 1208 | smp_wmb(); |
1159 | 1209 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2e2aa9fec9bb..edd23386b47d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -419,8 +419,8 @@ struct link_config { | |||
419 | unsigned short supported; /* link capabilities */ | 419 | unsigned short supported; /* link capabilities */ |
420 | unsigned short advertising; /* advertised capabilities */ | 420 | unsigned short advertising; /* advertised capabilities */ |
421 | unsigned short lp_advertising; /* peer advertised capabilities */ | 421 | unsigned short lp_advertising; /* peer advertised capabilities */ |
422 | unsigned short requested_speed; /* speed user has requested */ | 422 | unsigned int requested_speed; /* speed user has requested */ |
423 | unsigned short speed; /* actual link speed */ | 423 | unsigned int speed; /* actual link speed */ |
424 | unsigned char requested_fc; /* flow control user has requested */ | 424 | unsigned char requested_fc; /* flow control user has requested */ |
425 | unsigned char fc; /* actual link flow control */ | 425 | unsigned char fc; /* actual link flow control */ |
426 | unsigned char autoneg; /* autonegotiating? */ | 426 | unsigned char autoneg; /* autonegotiating? */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c45de49dc963..3ceafb55d6da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -4305,10 +4305,17 @@ static const struct pci_error_handlers cxgb4_eeh = { | |||
4305 | .resume = eeh_resume, | 4305 | .resume = eeh_resume, |
4306 | }; | 4306 | }; |
4307 | 4307 | ||
4308 | /* Return true if the Link Configuration supports "High Speeds" (those greater | ||
4309 | * than 1Gb/s). | ||
4310 | */ | ||
4308 | static inline bool is_x_10g_port(const struct link_config *lc) | 4311 | static inline bool is_x_10g_port(const struct link_config *lc) |
4309 | { | 4312 | { |
4310 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || | 4313 | unsigned int speeds, high_speeds; |
4311 | (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; | 4314 | |
4315 | speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); | ||
4316 | high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); | ||
4317 | |||
4318 | return high_speeds != 0; | ||
4312 | } | 4319 | } |
4313 | 4320 | ||
4314 | static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, | 4321 | static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, |
@@ -4335,6 +4342,11 @@ static void cfg_queues(struct adapter *adap) | |||
4335 | #endif | 4342 | #endif |
4336 | int ciq_size; | 4343 | int ciq_size; |
4337 | 4344 | ||
4345 | /* Reduce memory usage in kdump environment, disable all offload. | ||
4346 | */ | ||
4347 | if (is_kdump_kernel()) | ||
4348 | adap->params.offload = 0; | ||
4349 | |||
4338 | for_each_port(adap, i) | 4350 | for_each_port(adap, i) |
4339 | n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); | 4351 | n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); |
4340 | #ifdef CONFIG_CHELSIO_T4_DCB | 4352 | #ifdef CONFIG_CHELSIO_T4_DCB |
@@ -4365,11 +4377,6 @@ static void cfg_queues(struct adapter *adap) | |||
4365 | if (q10g > netif_get_num_default_rss_queues()) | 4377 | if (q10g > netif_get_num_default_rss_queues()) |
4366 | q10g = netif_get_num_default_rss_queues(); | 4378 | q10g = netif_get_num_default_rss_queues(); |
4367 | 4379 | ||
4368 | /* Reduce memory usage in kdump environment, disable all offload. | ||
4369 | */ | ||
4370 | if (is_kdump_kernel()) | ||
4371 | adap->params.offload = 0; | ||
4372 | |||
4373 | for_each_port(adap, i) { | 4380 | for_each_port(adap, i) { |
4374 | struct port_info *pi = adap2pinfo(adap, i); | 4381 | struct port_info *pi = adap2pinfo(adap, i); |
4375 | 4382 | ||
@@ -4756,8 +4763,12 @@ static void print_port_info(const struct net_device *dev) | |||
4756 | bufp += sprintf(bufp, "1000/"); | 4763 | bufp += sprintf(bufp, "1000/"); |
4757 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) | 4764 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) |
4758 | bufp += sprintf(bufp, "10G/"); | 4765 | bufp += sprintf(bufp, "10G/"); |
4766 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) | ||
4767 | bufp += sprintf(bufp, "25G/"); | ||
4759 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) | 4768 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) |
4760 | bufp += sprintf(bufp, "40G/"); | 4769 | bufp += sprintf(bufp, "40G/"); |
4770 | if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) | ||
4771 | bufp += sprintf(bufp, "100G/"); | ||
4761 | if (bufp != buf) | 4772 | if (bufp != buf) |
4762 | --bufp; | 4773 | --bufp; |
4763 | sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); | 4774 | sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index dc92c80a75f4..660204bff726 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) | |||
3627 | } | 3627 | } |
3628 | 3628 | ||
3629 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ | 3629 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ |
3630 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ | 3630 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ |
3631 | FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ | ||
3631 | FW_PORT_CAP_ANEG) | 3632 | FW_PORT_CAP_ANEG) |
3632 | 3633 | ||
3633 | /** | 3634 | /** |
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) | |||
7196 | speed = 1000; | 7197 | speed = 1000; |
7197 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) | 7198 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) |
7198 | speed = 10000; | 7199 | speed = 10000; |
7200 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) | ||
7201 | speed = 25000; | ||
7199 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) | 7202 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) |
7200 | speed = 40000; | 7203 | speed = 40000; |
7204 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) | ||
7205 | speed = 100000; | ||
7201 | 7206 | ||
7202 | lc = &pi->link_cfg; | 7207 | lc = &pi->link_cfg; |
7203 | 7208 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index a89b30720e38..30507d44422c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -2265,6 +2265,12 @@ enum fw_port_cap { | |||
2265 | FW_PORT_CAP_802_3_ASM_DIR = 0x8000, | 2265 | FW_PORT_CAP_802_3_ASM_DIR = 0x8000, |
2266 | }; | 2266 | }; |
2267 | 2267 | ||
2268 | #define FW_PORT_CAP_SPEED_S 0 | ||
2269 | #define FW_PORT_CAP_SPEED_M 0x3f | ||
2270 | #define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S) | ||
2271 | #define FW_PORT_CAP_SPEED_G(x) \ | ||
2272 | (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M) | ||
2273 | |||
2268 | enum fw_port_mdi { | 2274 | enum fw_port_mdi { |
2269 | FW_PORT_CAP_MDI_UNCHANGED, | 2275 | FW_PORT_CAP_MDI_UNCHANGED, |
2270 | FW_PORT_CAP_MDI_AUTO, | 2276 | FW_PORT_CAP_MDI_AUTO, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 8ee541431e8b..17a2bbcf93f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | |||
@@ -108,8 +108,8 @@ struct link_config { | |||
108 | unsigned int supported; /* link capabilities */ | 108 | unsigned int supported; /* link capabilities */ |
109 | unsigned int advertising; /* advertised capabilities */ | 109 | unsigned int advertising; /* advertised capabilities */ |
110 | unsigned short lp_advertising; /* peer advertised capabilities */ | 110 | unsigned short lp_advertising; /* peer advertised capabilities */ |
111 | unsigned short requested_speed; /* speed user has requested */ | 111 | unsigned int requested_speed; /* speed user has requested */ |
112 | unsigned short speed; /* actual link speed */ | 112 | unsigned int speed; /* actual link speed */ |
113 | unsigned char requested_fc; /* flow control user has requested */ | 113 | unsigned char requested_fc; /* flow control user has requested */ |
114 | unsigned char fc; /* actual link flow control */ | 114 | unsigned char fc; /* actual link flow control */ |
115 | unsigned char autoneg; /* autonegotiating? */ | 115 | unsigned char autoneg; /* autonegotiating? */ |
@@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc) | |||
271 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; | 271 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; |
272 | } | 272 | } |
273 | 273 | ||
274 | /* Return true if the Link Configuration supports "High Speeds" (those greater | ||
275 | * than 1Gb/s). | ||
276 | */ | ||
274 | static inline bool is_x_10g_port(const struct link_config *lc) | 277 | static inline bool is_x_10g_port(const struct link_config *lc) |
275 | { | 278 | { |
276 | return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || | 279 | unsigned int speeds, high_speeds; |
277 | (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; | 280 | |
281 | speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); | ||
282 | high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); | ||
283 | |||
284 | return high_speeds != 0; | ||
278 | } | 285 | } |
279 | 286 | ||
280 | static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) | 287 | static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 427bfa71388b..b5622b1689e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
314 | } | 314 | } |
315 | 315 | ||
316 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ | 316 | #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ |
317 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ | 317 | FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ |
318 | FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) | 318 | FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ |
319 | FW_PORT_CAP_ANEG) | ||
319 | 320 | ||
320 | /** | 321 | /** |
321 | * init_link_config - initialize a link's SW state | 322 | * init_link_config - initialize a link's SW state |
@@ -1712,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) | |||
1712 | speed = 1000; | 1713 | speed = 1000; |
1713 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) | 1714 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) |
1714 | speed = 10000; | 1715 | speed = 10000; |
1716 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) | ||
1717 | speed = 25000; | ||
1715 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) | 1718 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) |
1716 | speed = 40000; | 1719 | speed = 40000; |
1720 | else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) | ||
1721 | speed = 100000; | ||
1717 | 1722 | ||
1718 | /* | 1723 | /* |
1719 | * Scan all of our "ports" (Virtual Interfaces) looking for | 1724 | * Scan all of our "ports" (Virtual Interfaces) looking for |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16ba719..f45385f5c6e5 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1299,6 +1299,7 @@ static int | |||
1299 | dm9000_open(struct net_device *dev) | 1299 | dm9000_open(struct net_device *dev) |
1300 | { | 1300 | { |
1301 | struct board_info *db = netdev_priv(dev); | 1301 | struct board_info *db = netdev_priv(dev); |
1302 | unsigned int irq_flags = irq_get_trigger_type(dev->irq); | ||
1302 | 1303 | ||
1303 | if (netif_msg_ifup(db)) | 1304 | if (netif_msg_ifup(db)) |
1304 | dev_dbg(db->dev, "enabling %s\n", dev->name); | 1305 | dev_dbg(db->dev, "enabling %s\n", dev->name); |
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) | |||
1306 | /* If there is no IRQ type specified, tell the user that this is a | 1307 | /* If there is no IRQ type specified, tell the user that this is a |
1307 | * problem | 1308 | * problem |
1308 | */ | 1309 | */ |
1309 | if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) | 1310 | if (irq_flags == IRQF_TRIGGER_NONE) |
1310 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); | 1311 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); |
1311 | 1312 | ||
1313 | irq_flags |= IRQF_SHARED; | ||
1314 | |||
1312 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | 1315 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ |
1313 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | 1316 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ |
1314 | mdelay(1); /* delay needs by DM9000B */ | 1317 | mdelay(1); /* delay needs by DM9000B */ |
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) | |||
1316 | /* Initialize DM9000 board */ | 1319 | /* Initialize DM9000 board */ |
1317 | dm9000_init_dm9000(dev); | 1320 | dm9000_init_dm9000(dev); |
1318 | 1321 | ||
1319 | if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, | 1322 | if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) |
1320 | dev->name, dev)) | ||
1321 | return -EAGAIN; | 1323 | return -EAGAIN; |
1322 | /* Now that we have an interrupt handler hooked up we can unmask | 1324 | /* Now that we have an interrupt handler hooked up we can unmask |
1323 | * our interrupts | 1325 | * our interrupts |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d20935dc8399..4b4f5bc0e279 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, | |||
2922 | { | 2922 | { |
2923 | unsigned int size = lstatus & BD_LENGTH_MASK; | 2923 | unsigned int size = lstatus & BD_LENGTH_MASK; |
2924 | struct page *page = rxb->page; | 2924 | struct page *page = rxb->page; |
2925 | bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); | ||
2925 | 2926 | ||
2926 | /* Remove the FCS from the packet length */ | 2927 | /* Remove the FCS from the packet length */ |
2927 | if (likely(lstatus & BD_LFLAG(RXBD_LAST))) | 2928 | if (last) |
2928 | size -= ETH_FCS_LEN; | 2929 | size -= ETH_FCS_LEN; |
2929 | 2930 | ||
2930 | if (likely(first)) | 2931 | if (likely(first)) { |
2931 | skb_put(skb, size); | 2932 | skb_put(skb, size); |
2932 | else | 2933 | } else { |
2933 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | 2934 | /* the last fragments' length contains the full frame length */ |
2934 | rxb->page_offset + RXBUF_ALIGNMENT, | 2935 | if (last) |
2935 | size, GFAR_RXB_TRUESIZE); | 2936 | size -= skb->len; |
2937 | |||
2938 | /* in case the last fragment consisted only of the FCS */ | ||
2939 | if (size > 0) | ||
2940 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
2941 | rxb->page_offset + RXBUF_ALIGNMENT, | ||
2942 | size, GFAR_RXB_TRUESIZE); | ||
2943 | } | ||
2936 | 2944 | ||
2937 | /* try reuse page */ | 2945 | /* try reuse page */ |
2938 | if (unlikely(page_count(page) != 1)) | 2946 | if (unlikely(page_count(page) != 1)) |
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 373fd094f2f3..6e8a9c8467b9 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[]; | |||
100 | #define DEFAULT_RX_LFC_THR 16 | 100 | #define DEFAULT_RX_LFC_THR 16 |
101 | #define DEFAULT_LFC_PTVVAL 4 | 101 | #define DEFAULT_LFC_PTVVAL 4 |
102 | 102 | ||
103 | #define GFAR_RXB_SIZE 1536 | 103 | /* prevent fragmenation by HW in DSA environments */ |
104 | #define GFAR_RXB_SIZE roundup(1536 + 8, 64) | ||
104 | #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ | 105 | #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ |
105 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | 106 | + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
106 | #define GFAR_RXB_TRUESIZE 2048 | 107 | #define GFAR_RXB_TRUESIZE 2048 |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 1235c7f2564b..1e1eb92998fb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { | |||
17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, | 17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, |
18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, | 18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, |
19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, | 19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, |
20 | {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, | 20 | {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, |
21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, | 21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, |
22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, | 22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, |
23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, | 23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index ff8b6a468b24..6ea872287307 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
@@ -328,9 +328,10 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) | |||
328 | static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) | 328 | static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) |
329 | { | 329 | { |
330 | u32 port; | 330 | u32 port; |
331 | struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev; | ||
332 | 331 | ||
333 | if (ppe_cb->ppe_common_cb) { | 332 | if (ppe_cb->ppe_common_cb) { |
333 | struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev; | ||
334 | |||
334 | port = ppe_cb->index; | 335 | port = ppe_cb->index; |
335 | dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); | 336 | dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); |
336 | } | 337 | } |
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 4c9771d57d6e..7af09cbc53f0 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev) | |||
977 | dev->mcast_pending = 1; | 977 | dev->mcast_pending = 1; |
978 | return; | 978 | return; |
979 | } | 979 | } |
980 | |||
981 | mutex_lock(&dev->link_lock); | ||
980 | __emac_set_multicast_list(dev); | 982 | __emac_set_multicast_list(dev); |
983 | mutex_unlock(&dev->link_lock); | ||
984 | } | ||
985 | |||
986 | static int emac_set_mac_address(struct net_device *ndev, void *sa) | ||
987 | { | ||
988 | struct emac_instance *dev = netdev_priv(ndev); | ||
989 | struct sockaddr *addr = sa; | ||
990 | struct emac_regs __iomem *p = dev->emacp; | ||
991 | |||
992 | if (!is_valid_ether_addr(addr->sa_data)) | ||
993 | return -EADDRNOTAVAIL; | ||
994 | |||
995 | mutex_lock(&dev->link_lock); | ||
996 | |||
997 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | ||
998 | |||
999 | emac_rx_disable(dev); | ||
1000 | emac_tx_disable(dev); | ||
1001 | out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]); | ||
1002 | out_be32(&p->ialr, (ndev->dev_addr[2] << 24) | | ||
1003 | (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | | ||
1004 | ndev->dev_addr[5]); | ||
1005 | emac_tx_enable(dev); | ||
1006 | emac_rx_enable(dev); | ||
1007 | |||
1008 | mutex_unlock(&dev->link_lock); | ||
1009 | |||
1010 | return 0; | ||
981 | } | 1011 | } |
982 | 1012 | ||
983 | static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) | 1013 | static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) |
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = { | |||
2686 | .ndo_do_ioctl = emac_ioctl, | 2716 | .ndo_do_ioctl = emac_ioctl, |
2687 | .ndo_tx_timeout = emac_tx_timeout, | 2717 | .ndo_tx_timeout = emac_tx_timeout, |
2688 | .ndo_validate_addr = eth_validate_addr, | 2718 | .ndo_validate_addr = eth_validate_addr, |
2689 | .ndo_set_mac_address = eth_mac_addr, | 2719 | .ndo_set_mac_address = emac_set_mac_address, |
2690 | .ndo_start_xmit = emac_start_xmit, | 2720 | .ndo_start_xmit = emac_start_xmit, |
2691 | .ndo_change_mtu = eth_change_mtu, | 2721 | .ndo_change_mtu = eth_change_mtu, |
2692 | }; | 2722 | }; |
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = { | |||
2699 | .ndo_do_ioctl = emac_ioctl, | 2729 | .ndo_do_ioctl = emac_ioctl, |
2700 | .ndo_tx_timeout = emac_tx_timeout, | 2730 | .ndo_tx_timeout = emac_tx_timeout, |
2701 | .ndo_validate_addr = eth_validate_addr, | 2731 | .ndo_validate_addr = eth_validate_addr, |
2702 | .ndo_set_mac_address = eth_mac_addr, | 2732 | .ndo_set_mac_address = emac_set_mac_address, |
2703 | .ndo_start_xmit = emac_start_xmit_sg, | 2733 | .ndo_start_xmit = emac_start_xmit_sg, |
2704 | .ndo_change_mtu = emac_change_mtu, | 2734 | .ndo_change_mtu = emac_change_mtu, |
2705 | }; | 2735 | }; |
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7fd4d54599e4..6b03c8553e59 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c | |||
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = { | |||
2032 | | FLAG2_DISABLE_ASPM_L0S | 2032 | | FLAG2_DISABLE_ASPM_L0S |
2033 | | FLAG2_DISABLE_ASPM_L1 | 2033 | | FLAG2_DISABLE_ASPM_L1 |
2034 | | FLAG2_NO_DISABLE_RX | 2034 | | FLAG2_NO_DISABLE_RX |
2035 | | FLAG2_DMA_BURST, | 2035 | | FLAG2_DMA_BURST |
2036 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2036 | .pba = 32, | 2037 | .pba = 32, |
2037 | .max_hw_frame_size = DEFAULT_JUMBO, | 2038 | .max_hw_frame_size = DEFAULT_JUMBO, |
2038 | .get_variants = e1000_get_variants_82571, | 2039 | .get_variants = e1000_get_variants_82571, |
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = { | |||
2053 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 2054 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
2054 | .flags2 = FLAG2_DISABLE_ASPM_L0S | 2055 | .flags2 = FLAG2_DISABLE_ASPM_L0S |
2055 | | FLAG2_DISABLE_ASPM_L1 | 2056 | | FLAG2_DISABLE_ASPM_L1 |
2056 | | FLAG2_NO_DISABLE_RX, | 2057 | | FLAG2_NO_DISABLE_RX |
2058 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2057 | .pba = 32, | 2059 | .pba = 32, |
2058 | .max_hw_frame_size = DEFAULT_JUMBO, | 2060 | .max_hw_frame_size = DEFAULT_JUMBO, |
2059 | .get_variants = e1000_get_variants_82571, | 2061 | .get_variants = e1000_get_variants_82571, |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ef96cd11d6d2..879cca47b021 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); | |||
452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) | 452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) |
453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) | 453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) |
454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) | 454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) |
455 | #define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) | ||
455 | 456 | ||
456 | #define E1000_RX_DESC_PS(R, i) \ | 457 | #define E1000_RX_DESC_PS(R, i) \ |
457 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 458 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 3e11322d8d58..f3aaca743ea3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = { | |||
5885 | | FLAG_HAS_JUMBO_FRAMES | 5885 | | FLAG_HAS_JUMBO_FRAMES |
5886 | | FLAG_APME_IN_WUC, | 5886 | | FLAG_APME_IN_WUC, |
5887 | .flags2 = FLAG2_HAS_PHY_STATS | 5887 | .flags2 = FLAG2_HAS_PHY_STATS |
5888 | | FLAG2_HAS_EEE, | 5888 | | FLAG2_HAS_EEE |
5889 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
5889 | .pba = 26, | 5890 | .pba = 26, |
5890 | .max_hw_frame_size = 9022, | 5891 | .max_hw_frame_size = 9022, |
5891 | .get_variants = e1000_get_variants_ich8lan, | 5892 | .get_variants = e1000_get_variants_ich8lan, |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 02f443958f31..7017281ba2dc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) | |||
4303 | } | 4303 | } |
4304 | 4304 | ||
4305 | /** | 4305 | /** |
4306 | * e1000e_sanitize_systim - sanitize raw cycle counter reads | ||
4307 | * @hw: pointer to the HW structure | ||
4308 | * @systim: cycle_t value read, sanitized and returned | ||
4309 | * | ||
4310 | * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: | ||
4311 | * check to see that the time is incrementing at a reasonable | ||
4312 | * rate and is a multiple of incvalue. | ||
4313 | **/ | ||
4314 | static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) | ||
4315 | { | ||
4316 | u64 time_delta, rem, temp; | ||
4317 | cycle_t systim_next; | ||
4318 | u32 incvalue; | ||
4319 | int i; | ||
4320 | |||
4321 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4322 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4323 | /* latch SYSTIMH on read of SYSTIML */ | ||
4324 | systim_next = (cycle_t)er32(SYSTIML); | ||
4325 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4326 | |||
4327 | time_delta = systim_next - systim; | ||
4328 | temp = time_delta; | ||
4329 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4330 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4331 | |||
4332 | systim = systim_next; | ||
4333 | |||
4334 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) | ||
4335 | break; | ||
4336 | } | ||
4337 | |||
4338 | return systim; | ||
4339 | } | ||
4340 | |||
4341 | /** | ||
4306 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) | 4342 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) |
4307 | * @cc: cyclecounter structure | 4343 | * @cc: cyclecounter structure |
4308 | **/ | 4344 | **/ |
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4312 | cc); | 4348 | cc); |
4313 | struct e1000_hw *hw = &adapter->hw; | 4349 | struct e1000_hw *hw = &adapter->hw; |
4314 | u32 systimel, systimeh; | 4350 | u32 systimel, systimeh; |
4315 | cycle_t systim, systim_next; | 4351 | cycle_t systim; |
4316 | /* SYSTIMH latching upon SYSTIML read does not work well. | 4352 | /* SYSTIMH latching upon SYSTIML read does not work well. |
4317 | * This means that if SYSTIML overflows after we read it but before | 4353 | * This means that if SYSTIML overflows after we read it but before |
4318 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we | 4354 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we |
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4335 | systim = (cycle_t)systimel; | 4371 | systim = (cycle_t)systimel; |
4336 | systim |= (cycle_t)systimeh << 32; | 4372 | systim |= (cycle_t)systimeh << 32; |
4337 | 4373 | ||
4338 | if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { | 4374 | if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) |
4339 | u64 time_delta, rem, temp; | 4375 | systim = e1000e_sanitize_systim(hw, systim); |
4340 | u32 incvalue; | ||
4341 | int i; | ||
4342 | |||
4343 | /* errata for 82574/82583 possible bad bits read from SYSTIMH/L | ||
4344 | * check to see that the time is incrementing at a reasonable | ||
4345 | * rate and is a multiple of incvalue | ||
4346 | */ | ||
4347 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4348 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4349 | /* latch SYSTIMH on read of SYSTIML */ | ||
4350 | systim_next = (cycle_t)er32(SYSTIML); | ||
4351 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4352 | |||
4353 | time_delta = systim_next - systim; | ||
4354 | temp = time_delta; | ||
4355 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4356 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4357 | |||
4358 | systim = systim_next; | ||
4359 | 4376 | ||
4360 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && | ||
4361 | (rem == 0)) | ||
4362 | break; | ||
4363 | } | ||
4364 | } | ||
4365 | return systim; | 4377 | return systim; |
4366 | } | 4378 | } |
4367 | 4379 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index e1370c556a3c..618f18436618 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c | |||
@@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) | |||
199 | void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) | 199 | void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) |
200 | { | 200 | { |
201 | struct i40e_client_instance *cdev; | 201 | struct i40e_client_instance *cdev; |
202 | int ret = 0; | ||
202 | 203 | ||
203 | if (!vsi) | 204 | if (!vsi) |
204 | return; | 205 | return; |
@@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) | |||
211 | "Cannot locate client instance open routine\n"); | 212 | "Cannot locate client instance open routine\n"); |
212 | continue; | 213 | continue; |
213 | } | 214 | } |
214 | cdev->client->ops->open(&cdev->lan_info, cdev->client); | 215 | if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, |
216 | &cdev->state))) { | ||
217 | ret = cdev->client->ops->open(&cdev->lan_info, | ||
218 | cdev->client); | ||
219 | if (!ret) | ||
220 | set_bit(__I40E_CLIENT_INSTANCE_OPENED, | ||
221 | &cdev->state); | ||
222 | } | ||
215 | } | 223 | } |
216 | } | 224 | } |
217 | mutex_unlock(&i40e_client_instance_mutex); | 225 | mutex_unlock(&i40e_client_instance_mutex); |
@@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, | |||
407 | * i40e_client_add_instance - add a client instance struct to the instance list | 415 | * i40e_client_add_instance - add a client instance struct to the instance list |
408 | * @pf: pointer to the board struct | 416 | * @pf: pointer to the board struct |
409 | * @client: pointer to a client struct in the client list. | 417 | * @client: pointer to a client struct in the client list. |
418 | * @existing: if there was already an existing instance | ||
410 | * | 419 | * |
411 | * Returns cdev ptr on success, NULL on failure | 420 | * Returns cdev ptr on success or if already exists, NULL on failure |
412 | **/ | 421 | **/ |
413 | static | 422 | static |
414 | struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, | 423 | struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, |
415 | struct i40e_client *client) | 424 | struct i40e_client *client, |
425 | bool *existing) | ||
416 | { | 426 | { |
417 | struct i40e_client_instance *cdev; | 427 | struct i40e_client_instance *cdev; |
418 | struct netdev_hw_addr *mac = NULL; | 428 | struct netdev_hw_addr *mac = NULL; |
@@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, | |||
421 | mutex_lock(&i40e_client_instance_mutex); | 431 | mutex_lock(&i40e_client_instance_mutex); |
422 | list_for_each_entry(cdev, &i40e_client_instances, list) { | 432 | list_for_each_entry(cdev, &i40e_client_instances, list) { |
423 | if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { | 433 | if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { |
424 | cdev = NULL; | 434 | *existing = true; |
425 | goto out; | 435 | goto out; |
426 | } | 436 | } |
427 | } | 437 | } |
@@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf) | |||
505 | { | 515 | { |
506 | struct i40e_client_instance *cdev; | 516 | struct i40e_client_instance *cdev; |
507 | struct i40e_client *client; | 517 | struct i40e_client *client; |
518 | bool existing = false; | ||
508 | int ret = 0; | 519 | int ret = 0; |
509 | 520 | ||
510 | if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) | 521 | if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) |
@@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf) | |||
528 | /* check if L2 VSI is up, if not we are not ready */ | 539 | /* check if L2 VSI is up, if not we are not ready */ |
529 | if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) | 540 | if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) |
530 | continue; | 541 | continue; |
542 | } else { | ||
543 | dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n", | ||
544 | client->name); | ||
531 | } | 545 | } |
532 | 546 | ||
533 | /* Add the client instance to the instance list */ | 547 | /* Add the client instance to the instance list */ |
534 | cdev = i40e_client_add_instance(pf, client); | 548 | cdev = i40e_client_add_instance(pf, client, &existing); |
535 | if (!cdev) | 549 | if (!cdev) |
536 | continue; | 550 | continue; |
537 | 551 | ||
538 | /* Also up the ref_cnt of no. of instances of this client */ | 552 | if (!existing) { |
539 | atomic_inc(&client->ref_cnt); | 553 | /* Also up the ref_cnt for no. of instances of this |
540 | dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", | 554 | * client. |
541 | client->name, pf->hw.pf_id, | 555 | */ |
542 | pf->hw.bus.device, pf->hw.bus.func); | 556 | atomic_inc(&client->ref_cnt); |
557 | dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", | ||
558 | client->name, pf->hw.pf_id, | ||
559 | pf->hw.bus.device, pf->hw.bus.func); | ||
560 | } | ||
543 | 561 | ||
544 | /* Send an Open request to the client */ | 562 | /* Send an Open request to the client */ |
545 | atomic_inc(&cdev->ref_cnt); | 563 | atomic_inc(&cdev->ref_cnt); |
@@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf) | |||
588 | pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); | 606 | pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); |
589 | 607 | ||
590 | /* Since in some cases register may have happened before a device gets | 608 | /* Since in some cases register may have happened before a device gets |
591 | * added, we can schedule a subtask to go initiate the clients. | 609 | * added, we can schedule a subtask to go initiate the clients if |
610 | * they can be launched at probe time. | ||
592 | */ | 611 | */ |
593 | pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; | 612 | pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; |
594 | i40e_service_event_schedule(pf); | 613 | i40e_service_event_schedule(pf); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 81c99e1be708..d0b3a1bb82ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) | |||
4554 | **/ | 4554 | **/ |
4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | 4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) |
4556 | { | 4556 | { |
4557 | int i, tc_unused = 0; | ||
4557 | u8 num_tc = 0; | 4558 | u8 num_tc = 0; |
4558 | int i; | 4559 | u8 ret = 0; |
4559 | 4560 | ||
4560 | /* Scan the ETS Config Priority Table to find | 4561 | /* Scan the ETS Config Priority Table to find |
4561 | * traffic class enabled for a given priority | 4562 | * traffic class enabled for a given priority |
4562 | * and use the traffic class index to get the | 4563 | * and create a bitmask of enabled TCs |
4563 | * number of traffic classes enabled | ||
4564 | */ | 4564 | */ |
4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { | 4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) |
4566 | if (dcbcfg->etscfg.prioritytable[i] > num_tc) | 4566 | num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); |
4567 | num_tc = dcbcfg->etscfg.prioritytable[i]; | ||
4568 | } | ||
4569 | 4567 | ||
4570 | /* Traffic class index starts from zero so | 4568 | /* Now scan the bitmask to check for |
4571 | * increment to return the actual count | 4569 | * contiguous TCs starting with TC0 |
4572 | */ | 4570 | */ |
4573 | return num_tc + 1; | 4571 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { |
4572 | if (num_tc & BIT(i)) { | ||
4573 | if (!tc_unused) { | ||
4574 | ret++; | ||
4575 | } else { | ||
4576 | pr_err("Non-contiguous TC - Disabling DCB\n"); | ||
4577 | return 1; | ||
4578 | } | ||
4579 | } else { | ||
4580 | tc_unused = 1; | ||
4581 | } | ||
4582 | } | ||
4583 | |||
4584 | /* There is always at least TC0 */ | ||
4585 | if (!ret) | ||
4586 | ret = 1; | ||
4587 | |||
4588 | return ret; | ||
4574 | } | 4589 | } |
4575 | 4590 | ||
4576 | /** | 4591 | /** |
@@ -5098,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) | |||
5098 | DCB_CAP_DCBX_VER_IEEE; | 5113 | DCB_CAP_DCBX_VER_IEEE; |
5099 | 5114 | ||
5100 | pf->flags |= I40E_FLAG_DCB_CAPABLE; | 5115 | pf->flags |= I40E_FLAG_DCB_CAPABLE; |
5101 | /* Enable DCB tagging only when more than one TC */ | 5116 | /* Enable DCB tagging only when more than one TC |
5117 | * or explicitly disable if only one TC | ||
5118 | */ | ||
5102 | if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) | 5119 | if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) |
5103 | pf->flags |= I40E_FLAG_DCB_ENABLED; | 5120 | pf->flags |= I40E_FLAG_DCB_ENABLED; |
5121 | else | ||
5122 | pf->flags &= ~I40E_FLAG_DCB_ENABLED; | ||
5104 | dev_dbg(&pf->pdev->dev, | 5123 | dev_dbg(&pf->pdev->dev, |
5105 | "DCBX offload is supported for this PF.\n"); | 5124 | "DCBX offload is supported for this PF.\n"); |
5106 | } | 5125 | } |
@@ -5416,7 +5435,6 @@ int i40e_open(struct net_device *netdev) | |||
5416 | wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); | 5435 | wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); |
5417 | 5436 | ||
5418 | udp_tunnel_get_rx_info(netdev); | 5437 | udp_tunnel_get_rx_info(netdev); |
5419 | i40e_notify_client_of_netdev_open(vsi); | ||
5420 | 5438 | ||
5421 | return 0; | 5439 | return 0; |
5422 | } | 5440 | } |
@@ -5702,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, | |||
5702 | u8 type; | 5720 | u8 type; |
5703 | 5721 | ||
5704 | /* Not DCB capable or capability disabled */ | 5722 | /* Not DCB capable or capability disabled */ |
5705 | if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) | 5723 | if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) |
5706 | return ret; | 5724 | return ret; |
5707 | 5725 | ||
5708 | /* Ignore if event is not for Nearest Bridge */ | 5726 | /* Ignore if event is not for Nearest Bridge */ |
@@ -7882,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) | |||
7882 | #endif | 7900 | #endif |
7883 | I40E_FLAG_RSS_ENABLED | | 7901 | I40E_FLAG_RSS_ENABLED | |
7884 | I40E_FLAG_DCB_CAPABLE | | 7902 | I40E_FLAG_DCB_CAPABLE | |
7903 | I40E_FLAG_DCB_ENABLED | | ||
7885 | I40E_FLAG_SRIOV_ENABLED | | 7904 | I40E_FLAG_SRIOV_ENABLED | |
7886 | I40E_FLAG_FD_SB_ENABLED | | 7905 | I40E_FLAG_FD_SB_ENABLED | |
7887 | I40E_FLAG_FD_ATR_ENABLED | | 7906 | I40E_FLAG_FD_ATR_ENABLED | |
@@ -10488,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) | |||
10488 | I40E_FLAG_FD_SB_ENABLED | | 10507 | I40E_FLAG_FD_SB_ENABLED | |
10489 | I40E_FLAG_FD_ATR_ENABLED | | 10508 | I40E_FLAG_FD_ATR_ENABLED | |
10490 | I40E_FLAG_DCB_CAPABLE | | 10509 | I40E_FLAG_DCB_CAPABLE | |
10510 | I40E_FLAG_DCB_ENABLED | | ||
10491 | I40E_FLAG_SRIOV_ENABLED | | 10511 | I40E_FLAG_SRIOV_ENABLED | |
10492 | I40E_FLAG_VMDQ_ENABLED); | 10512 | I40E_FLAG_VMDQ_ENABLED); |
10493 | } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | | 10513 | } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | |
@@ -10511,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) | |||
10511 | /* Not enough queues for all TCs */ | 10531 | /* Not enough queues for all TCs */ |
10512 | if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && | 10532 | if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && |
10513 | (queues_left < I40E_MAX_TRAFFIC_CLASS)) { | 10533 | (queues_left < I40E_MAX_TRAFFIC_CLASS)) { |
10514 | pf->flags &= ~I40E_FLAG_DCB_CAPABLE; | 10534 | pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | |
10535 | I40E_FLAG_DCB_ENABLED); | ||
10515 | dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); | 10536 | dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); |
10516 | } | 10537 | } |
10517 | pf->num_lan_qps = max_t(int, pf->rss_size_max, | 10538 | pf->num_lan_qps = max_t(int, pf->rss_size_max, |
@@ -10908,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
10908 | err = i40e_init_pf_dcb(pf); | 10929 | err = i40e_init_pf_dcb(pf); |
10909 | if (err) { | 10930 | if (err) { |
10910 | dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); | 10931 | dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); |
10911 | pf->flags &= ~I40E_FLAG_DCB_CAPABLE; | 10932 | pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED); |
10912 | /* Continue without DCB enabled */ | 10933 | /* Continue without DCB enabled */ |
10913 | } | 10934 | } |
10914 | #endif /* CONFIG_I40E_DCB */ | 10935 | #endif /* CONFIG_I40E_DCB */ |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e61b647f5f2a..336c103ae374 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) | |||
744 | } | 744 | } |
745 | } | 745 | } |
746 | 746 | ||
747 | shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); | 747 | shhwtstamps.hwtstamp = |
748 | ktime_add_ns(shhwtstamps.hwtstamp, adjust); | ||
748 | 749 | ||
749 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); | 750 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); |
750 | dev_kfree_skb_any(adapter->ptp_tx_skb); | 751 | dev_kfree_skb_any(adapter->ptp_tx_skb); |
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, | |||
767 | struct sk_buff *skb) | 768 | struct sk_buff *skb) |
768 | { | 769 | { |
769 | __le64 *regval = (__le64 *)va; | 770 | __le64 *regval = (__le64 *)va; |
771 | struct igb_adapter *adapter = q_vector->adapter; | ||
772 | int adjust = 0; | ||
770 | 773 | ||
771 | /* The timestamp is recorded in little endian format. | 774 | /* The timestamp is recorded in little endian format. |
772 | * DWORD: 0 1 2 3 | 775 | * DWORD: 0 1 2 3 |
773 | * Field: Reserved Reserved SYSTIML SYSTIMH | 776 | * Field: Reserved Reserved SYSTIML SYSTIMH |
774 | */ | 777 | */ |
775 | igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), | 778 | igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), |
776 | le64_to_cpu(regval[1])); | 779 | le64_to_cpu(regval[1])); |
780 | |||
781 | /* adjust timestamp for the RX latency based on link speed */ | ||
782 | if (adapter->hw.mac.type == e1000_i210) { | ||
783 | switch (adapter->link_speed) { | ||
784 | case SPEED_10: | ||
785 | adjust = IGB_I210_RX_LATENCY_10; | ||
786 | break; | ||
787 | case SPEED_100: | ||
788 | adjust = IGB_I210_RX_LATENCY_100; | ||
789 | break; | ||
790 | case SPEED_1000: | ||
791 | adjust = IGB_I210_RX_LATENCY_1000; | ||
792 | break; | ||
793 | } | ||
794 | } | ||
795 | skb_hwtstamps(skb)->hwtstamp = | ||
796 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | ||
777 | } | 797 | } |
778 | 798 | ||
779 | /** | 799 | /** |
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, | |||
825 | } | 845 | } |
826 | } | 846 | } |
827 | skb_hwtstamps(skb)->hwtstamp = | 847 | skb_hwtstamps(skb)->hwtstamp = |
828 | ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | 848 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); |
829 | 849 | ||
830 | /* Update the last_rx_timestamp timer in order to enable watchdog check | 850 | /* Update the last_rx_timestamp timer in order to enable watchdog check |
831 | * for error case of latched timestamp on a dropped packet. | 851 | * for error case of latched timestamp on a dropped packet. |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index b4217f30e89c..c47b605e8651 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | |||
@@ -2958,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) | |||
2958 | } | 2958 | } |
2959 | 2959 | ||
2960 | /* was that the last pool using this rar? */ | 2960 | /* was that the last pool using this rar? */ |
2961 | if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) | 2961 | if (mpsar_lo == 0 && mpsar_hi == 0 && |
2962 | rar != 0 && rar != hw->mac.san_mac_rar_index) | ||
2962 | hw->mac.ops.clear_rar(hw, rar); | 2963 | hw->mac.ops.clear_rar(hw, rar); |
2964 | |||
2963 | return 0; | 2965 | return 0; |
2964 | } | 2966 | } |
2965 | 2967 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5418c69a7463..b4f03748adc0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4100 | struct ixgbe_hw *hw = &adapter->hw; | 4100 | struct ixgbe_hw *hw = &adapter->hw; |
4101 | u32 vlnctrl, i; | 4101 | u32 vlnctrl, i; |
4102 | 4102 | ||
4103 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4104 | |||
4103 | switch (hw->mac.type) { | 4105 | switch (hw->mac.type) { |
4104 | case ixgbe_mac_82599EB: | 4106 | case ixgbe_mac_82599EB: |
4105 | case ixgbe_mac_X540: | 4107 | case ixgbe_mac_X540: |
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4112 | /* fall through */ | 4114 | /* fall through */ |
4113 | case ixgbe_mac_82598EB: | 4115 | case ixgbe_mac_82598EB: |
4114 | /* legacy case, we can just disable VLAN filtering */ | 4116 | /* legacy case, we can just disable VLAN filtering */ |
4115 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | 4117 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
4116 | vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); | ||
4117 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 4118 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4118 | return; | 4119 | return; |
4119 | } | 4120 | } |
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4125 | /* Set flag so we don't redo unnecessary work */ | 4126 | /* Set flag so we don't redo unnecessary work */ |
4126 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; | 4127 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; |
4127 | 4128 | ||
4129 | /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ | ||
4130 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4131 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4132 | |||
4128 | /* Add PF to all active pools */ | 4133 | /* Add PF to all active pools */ |
4129 | for (i = IXGBE_VLVF_ENTRIES; --i;) { | 4134 | for (i = IXGBE_VLVF_ENTRIES; --i;) { |
4130 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); | 4135 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); |
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4191 | struct ixgbe_hw *hw = &adapter->hw; | 4196 | struct ixgbe_hw *hw = &adapter->hw; |
4192 | u32 vlnctrl, i; | 4197 | u32 vlnctrl, i; |
4193 | 4198 | ||
4199 | /* Set VLAN filtering to enabled */ | ||
4200 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4201 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4202 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4203 | |||
4194 | switch (hw->mac.type) { | 4204 | switch (hw->mac.type) { |
4195 | case ixgbe_mac_82599EB: | 4205 | case ixgbe_mac_82599EB: |
4196 | case ixgbe_mac_X540: | 4206 | case ixgbe_mac_X540: |
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4202 | break; | 4212 | break; |
4203 | /* fall through */ | 4213 | /* fall through */ |
4204 | case ixgbe_mac_82598EB: | 4214 | case ixgbe_mac_82598EB: |
4205 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4206 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
4207 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4208 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4209 | return; | 4215 | return; |
4210 | } | 4216 | } |
4211 | 4217 | ||
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
8390 | struct tcf_exts *exts, u64 *action, u8 *queue) | 8396 | struct tcf_exts *exts, u64 *action, u8 *queue) |
8391 | { | 8397 | { |
8392 | const struct tc_action *a; | 8398 | const struct tc_action *a; |
8399 | LIST_HEAD(actions); | ||
8393 | int err; | 8400 | int err; |
8394 | 8401 | ||
8395 | if (tc_no_actions(exts)) | 8402 | if (tc_no_actions(exts)) |
8396 | return -EINVAL; | 8403 | return -EINVAL; |
8397 | 8404 | ||
8398 | tc_for_each_action(a, exts) { | 8405 | tcf_exts_to_list(exts, &actions); |
8406 | list_for_each_entry(a, &actions, list) { | ||
8399 | 8407 | ||
8400 | /* Drop action */ | 8408 | /* Drop action */ |
8401 | if (is_tcf_gact_shot(a)) { | 8409 | if (is_tcf_gact_shot(a)) { |
@@ -9517,6 +9525,7 @@ skip_sriov: | |||
9517 | 9525 | ||
9518 | /* copy netdev features into list of user selectable features */ | 9526 | /* copy netdev features into list of user selectable features */ |
9519 | netdev->hw_features |= netdev->features | | 9527 | netdev->hw_features |= netdev->features | |
9528 | NETIF_F_HW_VLAN_CTAG_FILTER | | ||
9520 | NETIF_F_HW_VLAN_CTAG_RX | | 9529 | NETIF_F_HW_VLAN_CTAG_RX | |
9521 | NETIF_F_HW_VLAN_CTAG_TX | | 9530 | NETIF_F_HW_VLAN_CTAG_TX | |
9522 | NETIF_F_RXALL | | 9531 | NETIF_F_RXALL | |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b57ae3afb994..3743af8f1ded 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats { | |||
50 | MTK_ETHTOOL_STAT(rx_flow_control_packets), | 50 | MTK_ETHTOOL_STAT(rx_flow_control_packets), |
51 | }; | 51 | }; |
52 | 52 | ||
53 | static const char * const mtk_clks_source_name[] = { | ||
54 | "ethif", "esw", "gp1", "gp2" | ||
55 | }; | ||
56 | |||
53 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) | 57 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) |
54 | { | 58 | { |
55 | __raw_writel(val, eth->base + reg); | 59 | __raw_writel(val, eth->base + reg); |
@@ -245,12 +249,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
245 | case PHY_INTERFACE_MODE_MII: | 249 | case PHY_INTERFACE_MODE_MII: |
246 | ge_mode = 1; | 250 | ge_mode = 1; |
247 | break; | 251 | break; |
248 | case PHY_INTERFACE_MODE_RMII: | 252 | case PHY_INTERFACE_MODE_REVMII: |
249 | ge_mode = 2; | 253 | ge_mode = 2; |
250 | break; | 254 | break; |
255 | case PHY_INTERFACE_MODE_RMII: | ||
256 | if (!mac->id) | ||
257 | goto err_phy; | ||
258 | ge_mode = 3; | ||
259 | break; | ||
251 | default: | 260 | default: |
252 | dev_err(eth->dev, "invalid phy_mode\n"); | 261 | goto err_phy; |
253 | return -1; | ||
254 | } | 262 | } |
255 | 263 | ||
256 | /* put the gmac into the right mode */ | 264 | /* put the gmac into the right mode */ |
@@ -263,19 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 271 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
264 | mac->phy_dev->speed = 0; | 272 | mac->phy_dev->speed = 0; |
265 | mac->phy_dev->duplex = 0; | 273 | mac->phy_dev->duplex = 0; |
274 | |||
275 | if (of_phy_is_fixed_link(mac->of_node)) | ||
276 | mac->phy_dev->supported |= | ||
277 | SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
278 | |||
266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | | 279 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
267 | SUPPORTED_Asym_Pause; | 280 | SUPPORTED_Asym_Pause; |
268 | mac->phy_dev->advertising = mac->phy_dev->supported | | 281 | mac->phy_dev->advertising = mac->phy_dev->supported | |
269 | ADVERTISED_Autoneg; | 282 | ADVERTISED_Autoneg; |
270 | phy_start_aneg(mac->phy_dev); | 283 | phy_start_aneg(mac->phy_dev); |
271 | 284 | ||
285 | of_node_put(np); | ||
286 | |||
272 | return 0; | 287 | return 0; |
288 | |||
289 | err_phy: | ||
290 | of_node_put(np); | ||
291 | dev_err(eth->dev, "invalid phy_mode\n"); | ||
292 | return -EINVAL; | ||
273 | } | 293 | } |
274 | 294 | ||
275 | static int mtk_mdio_init(struct mtk_eth *eth) | 295 | static int mtk_mdio_init(struct mtk_eth *eth) |
276 | { | 296 | { |
277 | struct device_node *mii_np; | 297 | struct device_node *mii_np; |
278 | int err; | 298 | int ret; |
279 | 299 | ||
280 | mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); | 300 | mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); |
281 | if (!mii_np) { | 301 | if (!mii_np) { |
@@ -284,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth) | |||
284 | } | 304 | } |
285 | 305 | ||
286 | if (!of_device_is_available(mii_np)) { | 306 | if (!of_device_is_available(mii_np)) { |
287 | err = 0; | 307 | ret = -ENODEV; |
288 | goto err_put_node; | 308 | goto err_put_node; |
289 | } | 309 | } |
290 | 310 | ||
291 | eth->mii_bus = mdiobus_alloc(); | 311 | eth->mii_bus = devm_mdiobus_alloc(eth->dev); |
292 | if (!eth->mii_bus) { | 312 | if (!eth->mii_bus) { |
293 | err = -ENOMEM; | 313 | ret = -ENOMEM; |
294 | goto err_put_node; | 314 | goto err_put_node; |
295 | } | 315 | } |
296 | 316 | ||
@@ -301,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth) | |||
301 | eth->mii_bus->parent = eth->dev; | 321 | eth->mii_bus->parent = eth->dev; |
302 | 322 | ||
303 | snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); | 323 | snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); |
304 | err = of_mdiobus_register(eth->mii_bus, mii_np); | 324 | ret = of_mdiobus_register(eth->mii_bus, mii_np); |
305 | if (err) | ||
306 | goto err_free_bus; | ||
307 | |||
308 | return 0; | ||
309 | |||
310 | err_free_bus: | ||
311 | mdiobus_free(eth->mii_bus); | ||
312 | 325 | ||
313 | err_put_node: | 326 | err_put_node: |
314 | of_node_put(mii_np); | 327 | of_node_put(mii_np); |
315 | eth->mii_bus = NULL; | 328 | return ret; |
316 | return err; | ||
317 | } | 329 | } |
318 | 330 | ||
319 | static void mtk_mdio_cleanup(struct mtk_eth *eth) | 331 | static void mtk_mdio_cleanup(struct mtk_eth *eth) |
@@ -322,8 +334,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) | |||
322 | return; | 334 | return; |
323 | 335 | ||
324 | mdiobus_unregister(eth->mii_bus); | 336 | mdiobus_unregister(eth->mii_bus); |
325 | of_node_put(eth->mii_bus->dev.of_node); | ||
326 | mdiobus_free(eth->mii_bus); | ||
327 | } | 337 | } |
328 | 338 | ||
329 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | 339 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) |
@@ -542,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |||
542 | return &ring->buf[idx]; | 552 | return &ring->buf[idx]; |
543 | } | 553 | } |
544 | 554 | ||
545 | static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) | 555 | static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) |
546 | { | 556 | { |
547 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | 557 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { |
548 | dma_unmap_single(dev, | 558 | dma_unmap_single(eth->dev, |
549 | dma_unmap_addr(tx_buf, dma_addr0), | 559 | dma_unmap_addr(tx_buf, dma_addr0), |
550 | dma_unmap_len(tx_buf, dma_len0), | 560 | dma_unmap_len(tx_buf, dma_len0), |
551 | DMA_TO_DEVICE); | 561 | DMA_TO_DEVICE); |
552 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | 562 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { |
553 | dma_unmap_page(dev, | 563 | dma_unmap_page(eth->dev, |
554 | dma_unmap_addr(tx_buf, dma_addr0), | 564 | dma_unmap_addr(tx_buf, dma_addr0), |
555 | dma_unmap_len(tx_buf, dma_len0), | 565 | dma_unmap_len(tx_buf, dma_len0), |
556 | DMA_TO_DEVICE); | 566 | DMA_TO_DEVICE); |
@@ -572,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
572 | dma_addr_t mapped_addr; | 582 | dma_addr_t mapped_addr; |
573 | unsigned int nr_frags; | 583 | unsigned int nr_frags; |
574 | int i, n_desc = 1; | 584 | int i, n_desc = 1; |
575 | u32 txd4 = 0; | 585 | u32 txd4 = 0, fport; |
576 | 586 | ||
577 | itxd = ring->next_free; | 587 | itxd = ring->next_free; |
578 | if (itxd == ring->last_free) | 588 | if (itxd == ring->last_free) |
579 | return -ENOMEM; | 589 | return -ENOMEM; |
580 | 590 | ||
581 | /* set the forward port */ | 591 | /* set the forward port */ |
582 | txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; | 592 | fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; |
593 | txd4 |= fport; | ||
583 | 594 | ||
584 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | 595 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
585 | memset(tx_buf, 0, sizeof(*tx_buf)); | 596 | memset(tx_buf, 0, sizeof(*tx_buf)); |
@@ -595,9 +606,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
595 | if (skb_vlan_tag_present(skb)) | 606 | if (skb_vlan_tag_present(skb)) |
596 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); | 607 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); |
597 | 608 | ||
598 | mapped_addr = dma_map_single(&dev->dev, skb->data, | 609 | mapped_addr = dma_map_single(eth->dev, skb->data, |
599 | skb_headlen(skb), DMA_TO_DEVICE); | 610 | skb_headlen(skb), DMA_TO_DEVICE); |
600 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 611 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
601 | return -ENOMEM; | 612 | return -ENOMEM; |
602 | 613 | ||
603 | WRITE_ONCE(itxd->txd1, mapped_addr); | 614 | WRITE_ONCE(itxd->txd1, mapped_addr); |
@@ -623,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
623 | 634 | ||
624 | n_desc++; | 635 | n_desc++; |
625 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); | 636 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); |
626 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | 637 | mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, |
627 | frag_map_size, | 638 | frag_map_size, |
628 | DMA_TO_DEVICE); | 639 | DMA_TO_DEVICE); |
629 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 640 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
630 | goto err_dma; | 641 | goto err_dma; |
631 | 642 | ||
632 | if (i == nr_frags - 1 && | 643 | if (i == nr_frags - 1 && |
@@ -637,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
637 | WRITE_ONCE(txd->txd3, (TX_DMA_SWC | | 648 | WRITE_ONCE(txd->txd3, (TX_DMA_SWC | |
638 | TX_DMA_PLEN0(frag_map_size) | | 649 | TX_DMA_PLEN0(frag_map_size) | |
639 | last_frag * TX_DMA_LS0)); | 650 | last_frag * TX_DMA_LS0)); |
640 | WRITE_ONCE(txd->txd4, 0); | 651 | WRITE_ONCE(txd->txd4, fport); |
641 | 652 | ||
642 | tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; | 653 | tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; |
643 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | 654 | tx_buf = mtk_desc_to_tx_buf(ring, txd); |
@@ -679,7 +690,7 @@ err_dma: | |||
679 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | 690 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
680 | 691 | ||
681 | /* unmap dma */ | 692 | /* unmap dma */ |
682 | mtk_tx_unmap(&dev->dev, tx_buf); | 693 | mtk_tx_unmap(eth, tx_buf); |
683 | 694 | ||
684 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | 695 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; |
685 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | 696 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); |
@@ -836,11 +847,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
836 | netdev->stats.rx_dropped++; | 847 | netdev->stats.rx_dropped++; |
837 | goto release_desc; | 848 | goto release_desc; |
838 | } | 849 | } |
839 | dma_addr = dma_map_single(ð->netdev[mac]->dev, | 850 | dma_addr = dma_map_single(eth->dev, |
840 | new_data + NET_SKB_PAD, | 851 | new_data + NET_SKB_PAD, |
841 | ring->buf_size, | 852 | ring->buf_size, |
842 | DMA_FROM_DEVICE); | 853 | DMA_FROM_DEVICE); |
843 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | 854 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { |
844 | skb_free_frag(new_data); | 855 | skb_free_frag(new_data); |
845 | netdev->stats.rx_dropped++; | 856 | netdev->stats.rx_dropped++; |
846 | goto release_desc; | 857 | goto release_desc; |
@@ -849,13 +860,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
849 | /* receive data */ | 860 | /* receive data */ |
850 | skb = build_skb(data, ring->frag_size); | 861 | skb = build_skb(data, ring->frag_size); |
851 | if (unlikely(!skb)) { | 862 | if (unlikely(!skb)) { |
852 | put_page(virt_to_head_page(new_data)); | 863 | skb_free_frag(new_data); |
853 | netdev->stats.rx_dropped++; | 864 | netdev->stats.rx_dropped++; |
854 | goto release_desc; | 865 | goto release_desc; |
855 | } | 866 | } |
856 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | 867 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
857 | 868 | ||
858 | dma_unmap_single(&netdev->dev, trxd.rxd1, | 869 | dma_unmap_single(eth->dev, trxd.rxd1, |
859 | ring->buf_size, DMA_FROM_DEVICE); | 870 | ring->buf_size, DMA_FROM_DEVICE); |
860 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | 871 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); |
861 | skb->dev = netdev; | 872 | skb->dev = netdev; |
@@ -937,7 +948,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) | |||
937 | done[mac]++; | 948 | done[mac]++; |
938 | budget--; | 949 | budget--; |
939 | } | 950 | } |
940 | mtk_tx_unmap(eth->dev, tx_buf); | 951 | mtk_tx_unmap(eth, tx_buf); |
941 | 952 | ||
942 | ring->last_free = desc; | 953 | ring->last_free = desc; |
943 | atomic_inc(&ring->free_count); | 954 | atomic_inc(&ring->free_count); |
@@ -1092,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) | |||
1092 | 1103 | ||
1093 | if (ring->buf) { | 1104 | if (ring->buf) { |
1094 | for (i = 0; i < MTK_DMA_SIZE; i++) | 1105 | for (i = 0; i < MTK_DMA_SIZE; i++) |
1095 | mtk_tx_unmap(eth->dev, &ring->buf[i]); | 1106 | mtk_tx_unmap(eth, &ring->buf[i]); |
1096 | kfree(ring->buf); | 1107 | kfree(ring->buf); |
1097 | ring->buf = NULL; | 1108 | ring->buf = NULL; |
1098 | } | 1109 | } |
@@ -1490,10 +1501,7 @@ static void mtk_uninit(struct net_device *dev) | |||
1490 | struct mtk_eth *eth = mac->hw; | 1501 | struct mtk_eth *eth = mac->hw; |
1491 | 1502 | ||
1492 | phy_disconnect(mac->phy_dev); | 1503 | phy_disconnect(mac->phy_dev); |
1493 | mtk_mdio_cleanup(eth); | ||
1494 | mtk_irq_disable(eth, ~0); | 1504 | mtk_irq_disable(eth, ~0); |
1495 | free_irq(eth->irq[1], dev); | ||
1496 | free_irq(eth->irq[2], dev); | ||
1497 | } | 1505 | } |
1498 | 1506 | ||
1499 | static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 1507 | static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
@@ -1751,6 +1759,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |||
1751 | goto free_netdev; | 1759 | goto free_netdev; |
1752 | } | 1760 | } |
1753 | spin_lock_init(&mac->hw_stats->stats_lock); | 1761 | spin_lock_init(&mac->hw_stats->stats_lock); |
1762 | u64_stats_init(&mac->hw_stats->syncp); | ||
1754 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | 1763 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; |
1755 | 1764 | ||
1756 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | 1765 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); |
@@ -1796,6 +1805,7 @@ static int mtk_probe(struct platform_device *pdev) | |||
1796 | if (!eth) | 1805 | if (!eth) |
1797 | return -ENOMEM; | 1806 | return -ENOMEM; |
1798 | 1807 | ||
1808 | eth->dev = &pdev->dev; | ||
1799 | eth->base = devm_ioremap_resource(&pdev->dev, res); | 1809 | eth->base = devm_ioremap_resource(&pdev->dev, res); |
1800 | if (IS_ERR(eth->base)) | 1810 | if (IS_ERR(eth->base)) |
1801 | return PTR_ERR(eth->base); | 1811 | return PTR_ERR(eth->base); |
@@ -1830,21 +1840,21 @@ static int mtk_probe(struct platform_device *pdev) | |||
1830 | return -ENXIO; | 1840 | return -ENXIO; |
1831 | } | 1841 | } |
1832 | } | 1842 | } |
1843 | for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { | ||
1844 | eth->clks[i] = devm_clk_get(eth->dev, | ||
1845 | mtk_clks_source_name[i]); | ||
1846 | if (IS_ERR(eth->clks[i])) { | ||
1847 | if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) | ||
1848 | return -EPROBE_DEFER; | ||
1849 | return -ENODEV; | ||
1850 | } | ||
1851 | } | ||
1833 | 1852 | ||
1834 | eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); | 1853 | clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); |
1835 | eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); | 1854 | clk_prepare_enable(eth->clks[MTK_CLK_ESW]); |
1836 | eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); | 1855 | clk_prepare_enable(eth->clks[MTK_CLK_GP1]); |
1837 | eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); | 1856 | clk_prepare_enable(eth->clks[MTK_CLK_GP2]); |
1838 | if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) || | ||
1839 | IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif)) | ||
1840 | return -ENODEV; | ||
1841 | |||
1842 | clk_prepare_enable(eth->clk_ethif); | ||
1843 | clk_prepare_enable(eth->clk_esw); | ||
1844 | clk_prepare_enable(eth->clk_gp1); | ||
1845 | clk_prepare_enable(eth->clk_gp2); | ||
1846 | 1857 | ||
1847 | eth->dev = &pdev->dev; | ||
1848 | eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); | 1858 | eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); |
1849 | INIT_WORK(ð->pending_work, mtk_pending_work); | 1859 | INIT_WORK(ð->pending_work, mtk_pending_work); |
1850 | 1860 | ||
@@ -1886,15 +1896,24 @@ err_free_dev: | |||
1886 | static int mtk_remove(struct platform_device *pdev) | 1896 | static int mtk_remove(struct platform_device *pdev) |
1887 | { | 1897 | { |
1888 | struct mtk_eth *eth = platform_get_drvdata(pdev); | 1898 | struct mtk_eth *eth = platform_get_drvdata(pdev); |
1899 | int i; | ||
1889 | 1900 | ||
1890 | clk_disable_unprepare(eth->clk_ethif); | 1901 | /* stop all devices to make sure that dma is properly shut down */ |
1891 | clk_disable_unprepare(eth->clk_esw); | 1902 | for (i = 0; i < MTK_MAC_COUNT; i++) { |
1892 | clk_disable_unprepare(eth->clk_gp1); | 1903 | if (!eth->netdev[i]) |
1893 | clk_disable_unprepare(eth->clk_gp2); | 1904 | continue; |
1905 | mtk_stop(eth->netdev[i]); | ||
1906 | } | ||
1907 | |||
1908 | clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); | ||
1909 | clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); | ||
1910 | clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); | ||
1911 | clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); | ||
1894 | 1912 | ||
1895 | netif_napi_del(ð->tx_napi); | 1913 | netif_napi_del(ð->tx_napi); |
1896 | netif_napi_del(ð->rx_napi); | 1914 | netif_napi_del(ð->rx_napi); |
1897 | mtk_cleanup(eth); | 1915 | mtk_cleanup(eth); |
1916 | mtk_mdio_cleanup(eth); | ||
1898 | platform_set_drvdata(pdev, NULL); | 1917 | platform_set_drvdata(pdev, NULL); |
1899 | 1918 | ||
1900 | return 0; | 1919 | return 0; |
@@ -1904,6 +1923,7 @@ const struct of_device_id of_mtk_match[] = { | |||
1904 | { .compatible = "mediatek,mt7623-eth" }, | 1923 | { .compatible = "mediatek,mt7623-eth" }, |
1905 | {}, | 1924 | {}, |
1906 | }; | 1925 | }; |
1926 | MODULE_DEVICE_TABLE(of, of_mtk_match); | ||
1907 | 1927 | ||
1908 | static struct platform_driver mtk_driver = { | 1928 | static struct platform_driver mtk_driver = { |
1909 | .probe = mtk_probe, | 1929 | .probe = mtk_probe, |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index f82e3acb947b..6e1ade7a25c5 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h | |||
@@ -290,6 +290,17 @@ enum mtk_tx_flags { | |||
290 | MTK_TX_FLAGS_PAGE0 = 0x02, | 290 | MTK_TX_FLAGS_PAGE0 = 0x02, |
291 | }; | 291 | }; |
292 | 292 | ||
293 | /* This enum allows us to identify how the clock is defined on the array of the | ||
294 | * clock in the order | ||
295 | */ | ||
296 | enum mtk_clks_map { | ||
297 | MTK_CLK_ETHIF, | ||
298 | MTK_CLK_ESW, | ||
299 | MTK_CLK_GP1, | ||
300 | MTK_CLK_GP2, | ||
301 | MTK_CLK_MAX | ||
302 | }; | ||
303 | |||
293 | /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at | 304 | /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at |
294 | * by the TX descriptor s | 305 | * by the TX descriptor s |
295 | * @skb: The SKB pointer of the packet being sent | 306 | * @skb: The SKB pointer of the packet being sent |
@@ -370,10 +381,7 @@ struct mtk_rx_ring { | |||
370 | * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring | 381 | * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring |
371 | * @phy_scratch_ring: physical address of scratch_ring | 382 | * @phy_scratch_ring: physical address of scratch_ring |
372 | * @scratch_head: The scratch memory that scratch_ring points to. | 383 | * @scratch_head: The scratch memory that scratch_ring points to. |
373 | * @clk_ethif: The ethif clock | 384 | * @clks: clock array for all clocks required |
374 | * @clk_esw: The switch clock | ||
375 | * @clk_gp1: The gmac1 clock | ||
376 | * @clk_gp2: The gmac2 clock | ||
377 | * @mii_bus: If there is a bus we need to create an instance for it | 385 | * @mii_bus: If there is a bus we need to create an instance for it |
378 | * @pending_work: The workqueue used to reset the dma ring | 386 | * @pending_work: The workqueue used to reset the dma ring |
379 | */ | 387 | */ |
@@ -400,10 +408,8 @@ struct mtk_eth { | |||
400 | struct mtk_tx_dma *scratch_ring; | 408 | struct mtk_tx_dma *scratch_ring; |
401 | dma_addr_t phy_scratch_ring; | 409 | dma_addr_t phy_scratch_ring; |
402 | void *scratch_head; | 410 | void *scratch_head; |
403 | struct clk *clk_ethif; | 411 | struct clk *clks[MTK_CLK_MAX]; |
404 | struct clk *clk_esw; | 412 | |
405 | struct clk *clk_gp1; | ||
406 | struct clk *clk_gp2; | ||
407 | struct mii_bus *mii_bus; | 413 | struct mii_bus *mii_bus; |
408 | struct work_struct pending_work; | 414 | struct work_struct pending_work; |
409 | }; | 415 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 99c6bbdff501..b04760a5034b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | |||
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap) | |||
94 | *cap = true; | 94 | *cap = true; |
95 | break; | 95 | break; |
96 | case DCB_CAP_ATTR_DCBX: | 96 | case DCB_CAP_ATTR_DCBX: |
97 | *cap = priv->cee_params.dcbx_cap; | 97 | *cap = priv->dcbx_cap; |
98 | break; | 98 | break; |
99 | case DCB_CAP_ATTR_PFC_TCS: | 99 | case DCB_CAP_ATTR_PFC_TCS: |
100 | *cap = 1 << mlx4_max_tc(priv->mdev->dev); | 100 | *cap = 1 << mlx4_max_tc(priv->mdev->dev); |
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev) | |||
111 | { | 111 | { |
112 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 112 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
113 | 113 | ||
114 | return priv->cee_params.dcb_cfg.pfc_state; | 114 | return priv->cee_config.pfc_state; |
115 | } | 115 | } |
116 | 116 | ||
117 | static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) | 117 | static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) |
118 | { | 118 | { |
119 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 119 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
120 | 120 | ||
121 | priv->cee_params.dcb_cfg.pfc_state = state; | 121 | priv->cee_config.pfc_state = state; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, | 124 | static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, |
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, | |||
126 | { | 126 | { |
127 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 127 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
128 | 128 | ||
129 | *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc; | 129 | *setting = priv->cee_config.dcb_pfc[priority]; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, | 132 | static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, |
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, | |||
134 | { | 134 | { |
135 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 135 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
136 | 136 | ||
137 | priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting; | 137 | priv->cee_config.dcb_pfc[priority] = setting; |
138 | priv->cee_params.dcb_cfg.pfc_state = true; | 138 | priv->cee_config.pfc_state = true; |
139 | } | 139 | } |
140 | 140 | ||
141 | static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) | 141 | static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) |
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) | |||
157 | { | 157 | { |
158 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 158 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
159 | struct mlx4_en_dev *mdev = priv->mdev; | 159 | struct mlx4_en_dev *mdev = priv->mdev; |
160 | struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg; | ||
161 | int err = 0; | ||
162 | 160 | ||
163 | if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) | 161 | if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) |
164 | return -EINVAL; | 162 | return 1; |
165 | 163 | ||
166 | if (dcb_cfg->pfc_state) { | 164 | if (priv->cee_config.pfc_state) { |
167 | int tc; | 165 | int tc; |
168 | 166 | ||
169 | priv->prof->rx_pause = 0; | 167 | priv->prof->rx_pause = 0; |
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) | |||
171 | for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { | 169 | for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { |
172 | u8 tc_mask = 1 << tc; | 170 | u8 tc_mask = 1 << tc; |
173 | 171 | ||
174 | switch (dcb_cfg->tc_config[tc].dcb_pfc) { | 172 | switch (priv->cee_config.dcb_pfc[tc]) { |
175 | case pfc_disabled: | 173 | case pfc_disabled: |
176 | priv->prof->tx_ppp &= ~tc_mask; | 174 | priv->prof->tx_ppp &= ~tc_mask; |
177 | priv->prof->rx_ppp &= ~tc_mask; | 175 | priv->prof->rx_ppp &= ~tc_mask; |
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) | |||
199 | en_dbg(DRV, priv, "Set pfc off\n"); | 197 | en_dbg(DRV, priv, "Set pfc off\n"); |
200 | } | 198 | } |
201 | 199 | ||
202 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 200 | if (mlx4_SET_PORT_general(mdev->dev, priv->port, |
203 | priv->rx_skb_size + ETH_FCS_LEN, | 201 | priv->rx_skb_size + ETH_FCS_LEN, |
204 | priv->prof->tx_pause, | 202 | priv->prof->tx_pause, |
205 | priv->prof->tx_ppp, | 203 | priv->prof->tx_ppp, |
206 | priv->prof->rx_pause, | 204 | priv->prof->rx_pause, |
207 | priv->prof->rx_ppp); | 205 | priv->prof->rx_ppp)) { |
208 | if (err) | ||
209 | en_err(priv, "Failed setting pause params\n"); | 206 | en_err(priv, "Failed setting pause params\n"); |
210 | return err; | 207 | return 1; |
208 | } | ||
209 | |||
210 | return 0; | ||
211 | } | 211 | } |
212 | 212 | ||
213 | static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) | 213 | static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) |
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state) | |||
225 | struct mlx4_en_priv *priv = netdev_priv(dev); | 225 | struct mlx4_en_priv *priv = netdev_priv(dev); |
226 | int num_tcs = 0; | 226 | int num_tcs = 0; |
227 | 227 | ||
228 | if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) | 228 | if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) |
229 | return 1; | 229 | return 1; |
230 | 230 | ||
231 | if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) | 231 | if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) |
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state) | |||
238 | priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; | 238 | priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; |
239 | } | 239 | } |
240 | 240 | ||
241 | return mlx4_en_setup_tc(dev, num_tcs); | 241 | if (mlx4_en_setup_tc(dev, num_tcs)) |
242 | return 1; | ||
243 | |||
244 | return 0; | ||
242 | } | 245 | } |
243 | 246 | ||
244 | /* On success returns a non-zero 802.1p user priority bitmap | 247 | /* On success returns a non-zero 802.1p user priority bitmap |
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) | |||
252 | .selector = idtype, | 255 | .selector = idtype, |
253 | .protocol = id, | 256 | .protocol = id, |
254 | }; | 257 | }; |
255 | if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) | 258 | if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) |
256 | return 0; | 259 | return 0; |
257 | 260 | ||
258 | return dcb_getapp(netdev, &app); | 261 | return dcb_getapp(netdev, &app); |
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype, | |||
264 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 267 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
265 | struct dcb_app app; | 268 | struct dcb_app app; |
266 | 269 | ||
267 | if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) | 270 | if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) |
268 | return -EINVAL; | 271 | return -EINVAL; |
269 | 272 | ||
270 | memset(&app, 0, sizeof(struct dcb_app)); | 273 | memset(&app, 0, sizeof(struct dcb_app)); |
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) | |||
433 | { | 436 | { |
434 | struct mlx4_en_priv *priv = netdev_priv(dev); | 437 | struct mlx4_en_priv *priv = netdev_priv(dev); |
435 | 438 | ||
436 | return priv->cee_params.dcbx_cap; | 439 | return priv->dcbx_cap; |
437 | } | 440 | } |
438 | 441 | ||
439 | static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) | 442 | static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) |
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) | |||
442 | struct ieee_ets ets = {0}; | 445 | struct ieee_ets ets = {0}; |
443 | struct ieee_pfc pfc = {0}; | 446 | struct ieee_pfc pfc = {0}; |
444 | 447 | ||
445 | if (mode == priv->cee_params.dcbx_cap) | 448 | if (mode == priv->dcbx_cap) |
446 | return 0; | 449 | return 0; |
447 | 450 | ||
448 | if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || | 451 | if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || |
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) | |||
451 | !(mode & DCB_CAP_DCBX_HOST)) | 454 | !(mode & DCB_CAP_DCBX_HOST)) |
452 | goto err; | 455 | goto err; |
453 | 456 | ||
454 | priv->cee_params.dcbx_cap = mode; | 457 | priv->dcbx_cap = mode; |
455 | 458 | ||
456 | ets.ets_cap = IEEE_8021QAZ_MAX_TCS; | 459 | ets.ets_cap = IEEE_8021QAZ_MAX_TCS; |
457 | pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; | 460 | pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 4198e9bf89d0..fedb829276f4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) | |||
71 | #ifdef CONFIG_MLX4_EN_DCB | 71 | #ifdef CONFIG_MLX4_EN_DCB |
72 | if (!mlx4_is_slave(priv->mdev->dev)) { | 72 | if (!mlx4_is_slave(priv->mdev->dev)) { |
73 | if (up) { | 73 | if (up) { |
74 | priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; | 74 | if (priv->dcbx_cap) |
75 | priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; | ||
75 | } else { | 76 | } else { |
76 | priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; | 77 | priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; |
77 | priv->cee_params.dcb_cfg.pfc_state = false; | 78 | priv->cee_config.pfc_state = false; |
78 | } | 79 | } |
79 | } | 80 | } |
80 | #endif /* CONFIG_MLX4_EN_DCB */ | 81 | #endif /* CONFIG_MLX4_EN_DCB */ |
@@ -3048,9 +3049,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
3048 | struct mlx4_en_priv *priv; | 3049 | struct mlx4_en_priv *priv; |
3049 | int i; | 3050 | int i; |
3050 | int err; | 3051 | int err; |
3051 | #ifdef CONFIG_MLX4_EN_DCB | ||
3052 | struct tc_configuration *tc; | ||
3053 | #endif | ||
3054 | 3052 | ||
3055 | dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), | 3053 | dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), |
3056 | MAX_TX_RINGS, MAX_RX_RINGS); | 3054 | MAX_TX_RINGS, MAX_RX_RINGS); |
@@ -3117,16 +3115,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
3117 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | 3115 | priv->msg_enable = MLX4_EN_MSG_LEVEL; |
3118 | #ifdef CONFIG_MLX4_EN_DCB | 3116 | #ifdef CONFIG_MLX4_EN_DCB |
3119 | if (!mlx4_is_slave(priv->mdev->dev)) { | 3117 | if (!mlx4_is_slave(priv->mdev->dev)) { |
3120 | priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE | | 3118 | priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | |
3121 | DCB_CAP_DCBX_HOST | | 3119 | DCB_CAP_DCBX_VER_IEEE; |
3122 | DCB_CAP_DCBX_VER_IEEE; | ||
3123 | priv->flags |= MLX4_EN_DCB_ENABLED; | 3120 | priv->flags |= MLX4_EN_DCB_ENABLED; |
3124 | priv->cee_params.dcb_cfg.pfc_state = false; | 3121 | priv->cee_config.pfc_state = false; |
3125 | 3122 | ||
3126 | for (i = 0; i < MLX4_EN_NUM_UP; i++) { | 3123 | for (i = 0; i < MLX4_EN_NUM_UP; i++) |
3127 | tc = &priv->cee_params.dcb_cfg.tc_config[i]; | 3124 | priv->cee_config.dcb_pfc[i] = pfc_disabled; |
3128 | tc->dcb_pfc = pfc_disabled; | ||
3129 | } | ||
3130 | 3125 | ||
3131 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { | 3126 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { |
3132 | dev->dcbnl_ops = &mlx4_en_dcbnl_ops; | 3127 | dev->dcbnl_ops = &mlx4_en_dcbnl_ops; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 9df87ca0515a..e2509bba3e7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
818 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, | 818 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, |
819 | &inline_ok, &fragptr); | 819 | &inline_ok, &fragptr); |
820 | if (unlikely(!real_size)) | 820 | if (unlikely(!real_size)) |
821 | goto tx_drop; | 821 | goto tx_drop_count; |
822 | 822 | ||
823 | /* Align descriptor to TXBB size */ | 823 | /* Align descriptor to TXBB size */ |
824 | desc_size = ALIGN(real_size, TXBB_SIZE); | 824 | desc_size = ALIGN(real_size, TXBB_SIZE); |
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
826 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { | 826 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { |
827 | if (netif_msg_tx_err(priv)) | 827 | if (netif_msg_tx_err(priv)) |
828 | en_warn(priv, "Oversized header or SG list\n"); | 828 | en_warn(priv, "Oversized header or SG list\n"); |
829 | goto tx_drop; | 829 | goto tx_drop_count; |
830 | } | 830 | } |
831 | 831 | ||
832 | bf_ok = ring->bf_enabled; | 832 | bf_ok = ring->bf_enabled; |
@@ -1071,9 +1071,10 @@ tx_drop_unmap: | |||
1071 | PCI_DMA_TODEVICE); | 1071 | PCI_DMA_TODEVICE); |
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | tx_drop_count: | ||
1075 | ring->tx_dropped++; | ||
1074 | tx_drop: | 1076 | tx_drop: |
1075 | dev_kfree_skb_any(skb); | 1077 | dev_kfree_skb_any(skb); |
1076 | ring->tx_dropped++; | ||
1077 | return NETDEV_TX_OK; | 1078 | return NETDEV_TX_OK; |
1078 | } | 1079 | } |
1079 | 1080 | ||
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, | |||
1106 | goto tx_drop; | 1107 | goto tx_drop; |
1107 | 1108 | ||
1108 | if (mlx4_en_is_tx_ring_full(ring)) | 1109 | if (mlx4_en_is_tx_ring_full(ring)) |
1109 | goto tx_drop; | 1110 | goto tx_drop_count; |
1110 | 1111 | ||
1111 | /* fetch ring->cons far ahead before needing it to avoid stall */ | 1112 | /* fetch ring->cons far ahead before needing it to avoid stall */ |
1112 | ring_cons = READ_ONCE(ring->cons); | 1113 | ring_cons = READ_ONCE(ring->cons); |
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, | |||
1176 | 1177 | ||
1177 | return NETDEV_TX_OK; | 1178 | return NETDEV_TX_OK; |
1178 | 1179 | ||
1179 | tx_drop: | 1180 | tx_drop_count: |
1180 | ring->tx_dropped++; | 1181 | ring->tx_dropped++; |
1182 | tx_drop: | ||
1181 | return NETDEV_TX_BUSY; | 1183 | return NETDEV_TX_BUSY; |
1182 | } | 1184 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index f613977455e0..cf8f8a72a801 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
1305 | return 0; | 1305 | return 0; |
1306 | 1306 | ||
1307 | err_out_unmap: | 1307 | err_out_unmap: |
1308 | while (i >= 0) | 1308 | while (i > 0) |
1309 | mlx4_free_eq(dev, &priv->eq_table.eq[i--]); | 1309 | mlx4_free_eq(dev, &priv->eq_table.eq[--i]); |
1310 | #ifdef CONFIG_RFS_ACCEL | 1310 | #ifdef CONFIG_RFS_ACCEL |
1311 | for (i = 1; i <= dev->caps.num_ports; i++) { | 1311 | for (i = 1; i <= dev->caps.num_ports; i++) { |
1312 | if (mlx4_priv(dev)->port[i].rmap) { | 1312 | if (mlx4_priv(dev)->port[i].rmap) { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 75dd2e3d3059..7183ac4135d2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) | |||
2970 | mlx4_err(dev, "Failed to create mtu file for port %d\n", port); | 2970 | mlx4_err(dev, "Failed to create mtu file for port %d\n", port); |
2971 | device_remove_file(&info->dev->persist->pdev->dev, | 2971 | device_remove_file(&info->dev->persist->pdev->dev, |
2972 | &info->port_attr); | 2972 | &info->port_attr); |
2973 | devlink_port_unregister(&info->devlink_port); | ||
2973 | info->port = -1; | 2974 | info->port = -1; |
2974 | } | 2975 | } |
2975 | 2976 | ||
@@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) | |||
2984 | device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); | 2985 | device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); |
2985 | device_remove_file(&info->dev->persist->pdev->dev, | 2986 | device_remove_file(&info->dev->persist->pdev->dev, |
2986 | &info->port_mtu_attr); | 2987 | &info->port_mtu_attr); |
2988 | devlink_port_unregister(&info->devlink_port); | ||
2989 | |||
2987 | #ifdef CONFIG_RFS_ACCEL | 2990 | #ifdef CONFIG_RFS_ACCEL |
2988 | free_irq_cpu_rmap(info->rmap); | 2991 | free_irq_cpu_rmap(info->rmap); |
2989 | info->rmap = NULL; | 2992 | info->rmap = NULL; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2c2913dcae98..9099dbd04951 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -482,20 +482,10 @@ enum dcb_pfc_type { | |||
482 | pfc_enabled_rx | 482 | pfc_enabled_rx |
483 | }; | 483 | }; |
484 | 484 | ||
485 | struct tc_configuration { | ||
486 | enum dcb_pfc_type dcb_pfc; | ||
487 | }; | ||
488 | |||
489 | struct mlx4_en_cee_config { | 485 | struct mlx4_en_cee_config { |
490 | bool pfc_state; | 486 | bool pfc_state; |
491 | struct tc_configuration tc_config[MLX4_EN_NUM_UP]; | 487 | enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP]; |
492 | }; | 488 | }; |
493 | |||
494 | struct mlx4_en_cee_params { | ||
495 | u8 dcbx_cap; | ||
496 | struct mlx4_en_cee_config dcb_cfg; | ||
497 | }; | ||
498 | |||
499 | #endif | 489 | #endif |
500 | 490 | ||
501 | struct ethtool_flow_id { | 491 | struct ethtool_flow_id { |
@@ -624,7 +614,8 @@ struct mlx4_en_priv { | |||
624 | struct ieee_ets ets; | 614 | struct ieee_ets ets; |
625 | u16 maxrate[IEEE_8021QAZ_MAX_TCS]; | 615 | u16 maxrate[IEEE_8021QAZ_MAX_TCS]; |
626 | enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; | 616 | enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; |
627 | struct mlx4_en_cee_params cee_params; | 617 | struct mlx4_en_cee_config cee_config; |
618 | u8 dcbx_cap; | ||
628 | #endif | 619 | #endif |
629 | #ifdef CONFIG_RFS_ACCEL | 620 | #ifdef CONFIG_RFS_ACCEL |
630 | spinlock_t filters_lock; | 621 | spinlock_t filters_lock; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 3d2095e5c61c..c5b2064297a1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c | |||
@@ -52,7 +52,7 @@ | |||
52 | 52 | ||
53 | #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 | 53 | #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 |
54 | #define MLX4_IGNORE_FCS_MASK 0x1 | 54 | #define MLX4_IGNORE_FCS_MASK 0x1 |
55 | #define MLNX4_TX_MAX_NUMBER 8 | 55 | #define MLX4_TC_MAX_NUMBER 8 |
56 | 56 | ||
57 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) | 57 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) |
58 | { | 58 | { |
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev) | |||
2022 | u8 num_tc = dev->caps.max_tc_eth; | 2022 | u8 num_tc = dev->caps.max_tc_eth; |
2023 | 2023 | ||
2024 | if (!num_tc) | 2024 | if (!num_tc) |
2025 | num_tc = MLNX4_TX_MAX_NUMBER; | 2025 | num_tc = MLX4_TC_MAX_NUMBER; |
2026 | 2026 | ||
2027 | return num_tc; | 2027 | return num_tc; |
2028 | } | 2028 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d6e2a1cae19a..c2ec01a22d55 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) | |||
143 | return cmd->cmd_buf + (idx << cmd->log_stride); | 143 | return cmd->cmd_buf + (idx << cmd->log_stride); |
144 | } | 144 | } |
145 | 145 | ||
146 | static u8 xor8_buf(void *buf, int len) | 146 | static u8 xor8_buf(void *buf, size_t offset, int len) |
147 | { | 147 | { |
148 | u8 *ptr = buf; | 148 | u8 *ptr = buf; |
149 | u8 sum = 0; | 149 | u8 sum = 0; |
150 | int i; | 150 | int i; |
151 | int end = len + offset; | ||
151 | 152 | ||
152 | for (i = 0; i < len; i++) | 153 | for (i = offset; i < end; i++) |
153 | sum ^= ptr[i]; | 154 | sum ^= ptr[i]; |
154 | 155 | ||
155 | return sum; | 156 | return sum; |
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len) | |||
157 | 158 | ||
158 | static int verify_block_sig(struct mlx5_cmd_prot_block *block) | 159 | static int verify_block_sig(struct mlx5_cmd_prot_block *block) |
159 | { | 160 | { |
160 | if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) | 161 | size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); |
162 | int xor_len = sizeof(*block) - sizeof(block->data) - 1; | ||
163 | |||
164 | if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) | ||
161 | return -EINVAL; | 165 | return -EINVAL; |
162 | 166 | ||
163 | if (xor8_buf(block, sizeof(*block)) != 0xff) | 167 | if (xor8_buf(block, 0, sizeof(*block)) != 0xff) |
164 | return -EINVAL; | 168 | return -EINVAL; |
165 | 169 | ||
166 | return 0; | 170 | return 0; |
167 | } | 171 | } |
168 | 172 | ||
169 | static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, | 173 | static void calc_block_sig(struct mlx5_cmd_prot_block *block) |
170 | int csum) | ||
171 | { | 174 | { |
172 | block->token = token; | 175 | int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; |
173 | if (csum) { | 176 | size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); |
174 | block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - | 177 | |
175 | sizeof(block->data) - 2); | 178 | block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); |
176 | block->sig = ~xor8_buf(block, sizeof(*block) - 1); | 179 | block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); |
177 | } | ||
178 | } | 180 | } |
179 | 181 | ||
180 | static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) | 182 | static void calc_chain_sig(struct mlx5_cmd_msg *msg) |
181 | { | 183 | { |
182 | struct mlx5_cmd_mailbox *next = msg->next; | 184 | struct mlx5_cmd_mailbox *next = msg->next; |
183 | 185 | int size = msg->len; | |
184 | while (next) { | 186 | int blen = size - min_t(int, sizeof(msg->first.data), size); |
185 | calc_block_sig(next->buf, token, csum); | 187 | int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) |
188 | / MLX5_CMD_DATA_BLOCK_SIZE; | ||
189 | int i = 0; | ||
190 | |||
191 | for (i = 0; i < n && next; i++) { | ||
192 | calc_block_sig(next->buf); | ||
186 | next = next->next; | 193 | next = next->next; |
187 | } | 194 | } |
188 | } | 195 | } |
189 | 196 | ||
190 | static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) | 197 | static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) |
191 | { | 198 | { |
192 | ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); | 199 | ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); |
193 | calc_chain_sig(ent->in, ent->token, csum); | 200 | if (csum) { |
194 | calc_chain_sig(ent->out, ent->token, csum); | 201 | calc_chain_sig(ent->in); |
202 | calc_chain_sig(ent->out); | ||
203 | } | ||
195 | } | 204 | } |
196 | 205 | ||
197 | static void poll_timeout(struct mlx5_cmd_work_ent *ent) | 206 | static void poll_timeout(struct mlx5_cmd_work_ent *ent) |
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) | |||
222 | struct mlx5_cmd_mailbox *next = ent->out->next; | 231 | struct mlx5_cmd_mailbox *next = ent->out->next; |
223 | int err; | 232 | int err; |
224 | u8 sig; | 233 | u8 sig; |
234 | int size = ent->out->len; | ||
235 | int blen = size - min_t(int, sizeof(ent->out->first.data), size); | ||
236 | int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) | ||
237 | / MLX5_CMD_DATA_BLOCK_SIZE; | ||
238 | int i = 0; | ||
225 | 239 | ||
226 | sig = xor8_buf(ent->lay, sizeof(*ent->lay)); | 240 | sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); |
227 | if (sig != 0xff) | 241 | if (sig != 0xff) |
228 | return -EINVAL; | 242 | return -EINVAL; |
229 | 243 | ||
230 | while (next) { | 244 | for (i = 0; i < n && next; i++) { |
231 | err = verify_block_sig(next->buf); | 245 | err = verify_block_sig(next->buf); |
232 | if (err) | 246 | if (err) |
233 | return err; | 247 | return err; |
@@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work) | |||
656 | spin_unlock_irqrestore(&cmd->alloc_lock, flags); | 670 | spin_unlock_irqrestore(&cmd->alloc_lock, flags); |
657 | } | 671 | } |
658 | 672 | ||
659 | ent->token = alloc_token(cmd); | ||
660 | cmd->ent_arr[ent->idx] = ent; | 673 | cmd->ent_arr[ent->idx] = ent; |
661 | lay = get_inst(cmd, ent->idx); | 674 | lay = get_inst(cmd, ent->idx); |
662 | ent->lay = lay; | 675 | ent->lay = lay; |
@@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) | |||
766 | static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, | 779 | static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, |
767 | struct mlx5_cmd_msg *out, void *uout, int uout_size, | 780 | struct mlx5_cmd_msg *out, void *uout, int uout_size, |
768 | mlx5_cmd_cbk_t callback, | 781 | mlx5_cmd_cbk_t callback, |
769 | void *context, int page_queue, u8 *status) | 782 | void *context, int page_queue, u8 *status, |
783 | u8 token) | ||
770 | { | 784 | { |
771 | struct mlx5_cmd *cmd = &dev->cmd; | 785 | struct mlx5_cmd *cmd = &dev->cmd; |
772 | struct mlx5_cmd_work_ent *ent; | 786 | struct mlx5_cmd_work_ent *ent; |
@@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, | |||
783 | if (IS_ERR(ent)) | 797 | if (IS_ERR(ent)) |
784 | return PTR_ERR(ent); | 798 | return PTR_ERR(ent); |
785 | 799 | ||
800 | ent->token = token; | ||
801 | |||
786 | if (!callback) | 802 | if (!callback) |
787 | init_completion(&ent->done); | 803 | init_completion(&ent->done); |
788 | 804 | ||
@@ -854,7 +870,8 @@ static const struct file_operations fops = { | |||
854 | .write = dbg_write, | 870 | .write = dbg_write, |
855 | }; | 871 | }; |
856 | 872 | ||
857 | static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) | 873 | static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, |
874 | u8 token) | ||
858 | { | 875 | { |
859 | struct mlx5_cmd_prot_block *block; | 876 | struct mlx5_cmd_prot_block *block; |
860 | struct mlx5_cmd_mailbox *next; | 877 | struct mlx5_cmd_mailbox *next; |
@@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) | |||
880 | memcpy(block->data, from, copy); | 897 | memcpy(block->data, from, copy); |
881 | from += copy; | 898 | from += copy; |
882 | size -= copy; | 899 | size -= copy; |
900 | block->token = token; | ||
883 | next = next->next; | 901 | next = next->next; |
884 | } | 902 | } |
885 | 903 | ||
@@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev, | |||
949 | } | 967 | } |
950 | 968 | ||
951 | static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, | 969 | static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, |
952 | gfp_t flags, int size) | 970 | gfp_t flags, int size, |
971 | u8 token) | ||
953 | { | 972 | { |
954 | struct mlx5_cmd_mailbox *tmp, *head = NULL; | 973 | struct mlx5_cmd_mailbox *tmp, *head = NULL; |
955 | struct mlx5_cmd_prot_block *block; | 974 | struct mlx5_cmd_prot_block *block; |
@@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, | |||
978 | tmp->next = head; | 997 | tmp->next = head; |
979 | block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); | 998 | block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); |
980 | block->block_num = cpu_to_be32(n - i - 1); | 999 | block->block_num = cpu_to_be32(n - i - 1); |
1000 | block->token = token; | ||
981 | head = tmp; | 1001 | head = tmp; |
982 | } | 1002 | } |
983 | msg->next = head; | 1003 | msg->next = head; |
@@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, | |||
1352 | } | 1372 | } |
1353 | 1373 | ||
1354 | if (IS_ERR(msg)) | 1374 | if (IS_ERR(msg)) |
1355 | msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); | 1375 | msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); |
1356 | 1376 | ||
1357 | return msg; | 1377 | return msg; |
1358 | } | 1378 | } |
@@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, | |||
1377 | int err; | 1397 | int err; |
1378 | u8 status = 0; | 1398 | u8 status = 0; |
1379 | u32 drv_synd; | 1399 | u32 drv_synd; |
1400 | u8 token; | ||
1380 | 1401 | ||
1381 | if (pci_channel_offline(dev->pdev) || | 1402 | if (pci_channel_offline(dev->pdev) || |
1382 | dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { | 1403 | dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
@@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, | |||
1395 | return err; | 1416 | return err; |
1396 | } | 1417 | } |
1397 | 1418 | ||
1398 | err = mlx5_copy_to_msg(inb, in, in_size); | 1419 | token = alloc_token(&dev->cmd); |
1420 | |||
1421 | err = mlx5_copy_to_msg(inb, in, in_size, token); | ||
1399 | if (err) { | 1422 | if (err) { |
1400 | mlx5_core_warn(dev, "err %d\n", err); | 1423 | mlx5_core_warn(dev, "err %d\n", err); |
1401 | goto out_in; | 1424 | goto out_in; |
1402 | } | 1425 | } |
1403 | 1426 | ||
1404 | outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); | 1427 | outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); |
1405 | if (IS_ERR(outb)) { | 1428 | if (IS_ERR(outb)) { |
1406 | err = PTR_ERR(outb); | 1429 | err = PTR_ERR(outb); |
1407 | goto out_in; | 1430 | goto out_in; |
1408 | } | 1431 | } |
1409 | 1432 | ||
1410 | err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, | 1433 | err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, |
1411 | pages_queue, &status); | 1434 | pages_queue, &status, token); |
1412 | if (err) | 1435 | if (err) |
1413 | goto out_out; | 1436 | goto out_out; |
1414 | 1437 | ||
@@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) | |||
1476 | INIT_LIST_HEAD(&cmd->cache.med.head); | 1499 | INIT_LIST_HEAD(&cmd->cache.med.head); |
1477 | 1500 | ||
1478 | for (i = 0; i < NUM_LONG_LISTS; i++) { | 1501 | for (i = 0; i < NUM_LONG_LISTS; i++) { |
1479 | msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); | 1502 | msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); |
1480 | if (IS_ERR(msg)) { | 1503 | if (IS_ERR(msg)) { |
1481 | err = PTR_ERR(msg); | 1504 | err = PTR_ERR(msg); |
1482 | goto ex_err; | 1505 | goto ex_err; |
@@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) | |||
1486 | } | 1509 | } |
1487 | 1510 | ||
1488 | for (i = 0; i < NUM_MED_LISTS; i++) { | 1511 | for (i = 0; i < NUM_MED_LISTS; i++) { |
1489 | msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); | 1512 | msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); |
1490 | if (IS_ERR(msg)) { | 1513 | if (IS_ERR(msg)) { |
1491 | err = PTR_ERR(msg); | 1514 | err = PTR_ERR(msg); |
1492 | goto ex_err; | 1515 | goto ex_err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 1b495efa7490..bf722aa88cf0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -73,8 +73,12 @@ | |||
73 | #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) | 73 | #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) |
74 | #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ | 74 | #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ |
75 | MLX5_MPWRQ_WQE_PAGE_ORDER) | 75 | MLX5_MPWRQ_WQE_PAGE_ORDER) |
76 | #define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \ | 76 | |
77 | BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)) | 77 | #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) |
78 | #define MLX5E_REQUIRED_MTTS(rqs, wqes)\ | ||
79 | (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) | ||
80 | #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX) | ||
81 | |||
78 | #define MLX5_UMR_ALIGN (2048) | 82 | #define MLX5_UMR_ALIGN (2048) |
79 | #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) | 83 | #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) |
80 | 84 | ||
@@ -219,9 +223,8 @@ struct mlx5e_tstamp { | |||
219 | }; | 223 | }; |
220 | 224 | ||
221 | enum { | 225 | enum { |
222 | MLX5E_RQ_STATE_POST_WQES_ENABLE, | 226 | MLX5E_RQ_STATE_FLUSH, |
223 | MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, | 227 | MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, |
224 | MLX5E_RQ_STATE_FLUSH_TIMEOUT, | ||
225 | MLX5E_RQ_STATE_AM, | 228 | MLX5E_RQ_STATE_AM, |
226 | }; | 229 | }; |
227 | 230 | ||
@@ -304,6 +307,7 @@ struct mlx5e_rq { | |||
304 | 307 | ||
305 | unsigned long state; | 308 | unsigned long state; |
306 | int ix; | 309 | int ix; |
310 | u32 mpwqe_mtt_offset; | ||
307 | 311 | ||
308 | struct mlx5e_rx_am am; /* Adaptive Moderation */ | 312 | struct mlx5e_rx_am am; /* Adaptive Moderation */ |
309 | 313 | ||
@@ -365,9 +369,8 @@ struct mlx5e_sq_dma { | |||
365 | }; | 369 | }; |
366 | 370 | ||
367 | enum { | 371 | enum { |
368 | MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, | 372 | MLX5E_SQ_STATE_FLUSH, |
369 | MLX5E_SQ_STATE_BF_ENABLE, | 373 | MLX5E_SQ_STATE_BF_ENABLE, |
370 | MLX5E_SQ_STATE_TX_TIMEOUT, | ||
371 | }; | 374 | }; |
372 | 375 | ||
373 | struct mlx5e_ico_wqe_info { | 376 | struct mlx5e_ico_wqe_info { |
@@ -698,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget); | |||
698 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); | 701 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); |
699 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); | 702 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); |
700 | void mlx5e_free_tx_descs(struct mlx5e_sq *sq); | 703 | void mlx5e_free_tx_descs(struct mlx5e_sq *sq); |
701 | void mlx5e_free_rx_descs(struct mlx5e_rq *rq); | ||
702 | 704 | ||
703 | void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); | 705 | void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); |
704 | void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); | 706 | void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); |
@@ -814,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) | |||
814 | MLX5E_MAX_NUM_CHANNELS); | 816 | MLX5E_MAX_NUM_CHANNELS); |
815 | } | 817 | } |
816 | 818 | ||
817 | static inline int mlx5e_get_mtt_octw(int npages) | ||
818 | { | ||
819 | return ALIGN(npages, 8) / 2; | ||
820 | } | ||
821 | |||
822 | extern const struct ethtool_ops mlx5e_ethtool_ops; | 819 | extern const struct ethtool_ops mlx5e_ethtool_ops; |
823 | #ifdef CONFIG_MLX5_CORE_EN_DCB | 820 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
824 | extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; | 821 | extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 673043ccd76c..9cce153e1035 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c | |||
@@ -139,7 +139,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev) | |||
139 | struct mlx5e_tir *tir; | 139 | struct mlx5e_tir *tir; |
140 | void *in; | 140 | void *in; |
141 | int inlen; | 141 | int inlen; |
142 | int err; | 142 | int err = 0; |
143 | 143 | ||
144 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); | 144 | inlen = MLX5_ST_SZ_BYTES(modify_tir_in); |
145 | in = mlx5_vzalloc(inlen); | 145 | in = mlx5_vzalloc(inlen); |
@@ -151,10 +151,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev) | |||
151 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { | 151 | list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { |
152 | err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen); | 152 | err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen); |
153 | if (err) | 153 | if (err) |
154 | return err; | 154 | goto out; |
155 | } | 155 | } |
156 | 156 | ||
157 | out: | ||
157 | kvfree(in); | 158 | kvfree(in); |
158 | 159 | ||
159 | return 0; | 160 | return err; |
160 | } | 161 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index caa9a3ccc3f3..762af16ed021 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | |||
@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) | |||
127 | return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); | 127 | return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); |
128 | } | 128 | } |
129 | 129 | ||
130 | static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) | 130 | static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, |
131 | struct ieee_ets *ets) | ||
131 | { | 132 | { |
132 | int bw_sum = 0; | 133 | int bw_sum = 0; |
133 | int i; | 134 | int i; |
134 | 135 | ||
135 | /* Validate Priority */ | 136 | /* Validate Priority */ |
136 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { | 137 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { |
137 | if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) | 138 | if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) { |
139 | netdev_err(netdev, | ||
140 | "Failed to validate ETS: priority value greater than max(%d)\n", | ||
141 | MLX5E_MAX_PRIORITY); | ||
138 | return -EINVAL; | 142 | return -EINVAL; |
143 | } | ||
139 | } | 144 | } |
140 | 145 | ||
141 | /* Validate Bandwidth Sum */ | 146 | /* Validate Bandwidth Sum */ |
142 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { | 147 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { |
143 | if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { | 148 | if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { |
144 | if (!ets->tc_tx_bw[i]) | 149 | if (!ets->tc_tx_bw[i]) { |
150 | netdev_err(netdev, | ||
151 | "Failed to validate ETS: BW 0 is illegal\n"); | ||
145 | return -EINVAL; | 152 | return -EINVAL; |
153 | } | ||
146 | 154 | ||
147 | bw_sum += ets->tc_tx_bw[i]; | 155 | bw_sum += ets->tc_tx_bw[i]; |
148 | } | 156 | } |
149 | } | 157 | } |
150 | 158 | ||
151 | if (bw_sum != 0 && bw_sum != 100) | 159 | if (bw_sum != 0 && bw_sum != 100) { |
160 | netdev_err(netdev, | ||
161 | "Failed to validate ETS: BW sum is illegal\n"); | ||
152 | return -EINVAL; | 162 | return -EINVAL; |
163 | } | ||
153 | return 0; | 164 | return 0; |
154 | } | 165 | } |
155 | 166 | ||
@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, | |||
159 | struct mlx5e_priv *priv = netdev_priv(netdev); | 170 | struct mlx5e_priv *priv = netdev_priv(netdev); |
160 | int err; | 171 | int err; |
161 | 172 | ||
162 | err = mlx5e_dbcnl_validate_ets(ets); | 173 | err = mlx5e_dbcnl_validate_ets(netdev, ets); |
163 | if (err) | 174 | if (err) |
164 | return err; | 175 | return err; |
165 | 176 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 4a3757e60441..7a346bb2ed00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, | |||
331 | if (mlx5e_query_global_pause_combined(priv)) { | 331 | if (mlx5e_query_global_pause_combined(priv)) { |
332 | for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { | 332 | for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { |
333 | data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], | 333 | data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], |
334 | pport_per_prio_pfc_stats_desc, 0); | 334 | pport_per_prio_pfc_stats_desc, i); |
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
@@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, | |||
352 | sq_stats_desc, j); | 352 | sq_stats_desc, j); |
353 | } | 353 | } |
354 | 354 | ||
355 | static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, | ||
356 | int num_wqe) | ||
357 | { | ||
358 | int packets_per_wqe; | ||
359 | int stride_size; | ||
360 | int num_strides; | ||
361 | int wqe_size; | ||
362 | |||
363 | if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) | ||
364 | return num_wqe; | ||
365 | |||
366 | stride_size = 1 << priv->params.mpwqe_log_stride_sz; | ||
367 | num_strides = 1 << priv->params.mpwqe_log_num_strides; | ||
368 | wqe_size = stride_size * num_strides; | ||
369 | |||
370 | packets_per_wqe = wqe_size / | ||
371 | ALIGN(ETH_DATA_LEN, stride_size); | ||
372 | return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1)); | ||
373 | } | ||
374 | |||
375 | static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, | ||
376 | int num_packets) | ||
377 | { | ||
378 | int packets_per_wqe; | ||
379 | int stride_size; | ||
380 | int num_strides; | ||
381 | int wqe_size; | ||
382 | int num_wqes; | ||
383 | |||
384 | if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) | ||
385 | return num_packets; | ||
386 | |||
387 | stride_size = 1 << priv->params.mpwqe_log_stride_sz; | ||
388 | num_strides = 1 << priv->params.mpwqe_log_num_strides; | ||
389 | wqe_size = stride_size * num_strides; | ||
390 | |||
391 | num_packets = (1 << order_base_2(num_packets)); | ||
392 | |||
393 | packets_per_wqe = wqe_size / | ||
394 | ALIGN(ETH_DATA_LEN, stride_size); | ||
395 | num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe); | ||
396 | return 1 << (order_base_2(num_wqes)); | ||
397 | } | ||
398 | |||
355 | static void mlx5e_get_ringparam(struct net_device *dev, | 399 | static void mlx5e_get_ringparam(struct net_device *dev, |
356 | struct ethtool_ringparam *param) | 400 | struct ethtool_ringparam *param) |
357 | { | 401 | { |
358 | struct mlx5e_priv *priv = netdev_priv(dev); | 402 | struct mlx5e_priv *priv = netdev_priv(dev); |
359 | int rq_wq_type = priv->params.rq_wq_type; | 403 | int rq_wq_type = priv->params.rq_wq_type; |
360 | 404 | ||
361 | param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type); | 405 | param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, |
406 | 1 << mlx5_max_log_rq_size(rq_wq_type)); | ||
362 | param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; | 407 | param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; |
363 | param->rx_pending = 1 << priv->params.log_rq_size; | 408 | param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, |
409 | 1 << priv->params.log_rq_size); | ||
364 | param->tx_pending = 1 << priv->params.log_sq_size; | 410 | param->tx_pending = 1 << priv->params.log_sq_size; |
365 | } | 411 | } |
366 | 412 | ||
@@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev, | |||
370 | struct mlx5e_priv *priv = netdev_priv(dev); | 416 | struct mlx5e_priv *priv = netdev_priv(dev); |
371 | bool was_opened; | 417 | bool was_opened; |
372 | int rq_wq_type = priv->params.rq_wq_type; | 418 | int rq_wq_type = priv->params.rq_wq_type; |
419 | u32 rx_pending_wqes; | ||
420 | u32 min_rq_size; | ||
421 | u32 max_rq_size; | ||
373 | u16 min_rx_wqes; | 422 | u16 min_rx_wqes; |
374 | u8 log_rq_size; | 423 | u8 log_rq_size; |
375 | u8 log_sq_size; | 424 | u8 log_sq_size; |
425 | u32 num_mtts; | ||
376 | int err = 0; | 426 | int err = 0; |
377 | 427 | ||
378 | if (param->rx_jumbo_pending) { | 428 | if (param->rx_jumbo_pending) { |
@@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev, | |||
385 | __func__); | 435 | __func__); |
386 | return -EINVAL; | 436 | return -EINVAL; |
387 | } | 437 | } |
388 | if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) { | 438 | |
439 | min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, | ||
440 | 1 << mlx5_min_log_rq_size(rq_wq_type)); | ||
441 | max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, | ||
442 | 1 << mlx5_max_log_rq_size(rq_wq_type)); | ||
443 | rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, | ||
444 | param->rx_pending); | ||
445 | |||
446 | if (param->rx_pending < min_rq_size) { | ||
389 | netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", | 447 | netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", |
390 | __func__, param->rx_pending, | 448 | __func__, param->rx_pending, |
391 | 1 << mlx5_min_log_rq_size(rq_wq_type)); | 449 | min_rq_size); |
392 | return -EINVAL; | 450 | return -EINVAL; |
393 | } | 451 | } |
394 | if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) { | 452 | if (param->rx_pending > max_rq_size) { |
395 | netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", | 453 | netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", |
396 | __func__, param->rx_pending, | 454 | __func__, param->rx_pending, |
397 | 1 << mlx5_max_log_rq_size(rq_wq_type)); | 455 | max_rq_size); |
398 | return -EINVAL; | 456 | return -EINVAL; |
399 | } | 457 | } |
458 | |||
459 | num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels, | ||
460 | rx_pending_wqes); | ||
461 | if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && | ||
462 | !MLX5E_VALID_NUM_MTTS(num_mtts)) { | ||
463 | netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", | ||
464 | __func__, param->rx_pending); | ||
465 | return -EINVAL; | ||
466 | } | ||
467 | |||
400 | if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { | 468 | if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { |
401 | netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", | 469 | netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", |
402 | __func__, param->tx_pending, | 470 | __func__, param->tx_pending, |
@@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev, | |||
410 | return -EINVAL; | 478 | return -EINVAL; |
411 | } | 479 | } |
412 | 480 | ||
413 | log_rq_size = order_base_2(param->rx_pending); | 481 | log_rq_size = order_base_2(rx_pending_wqes); |
414 | log_sq_size = order_base_2(param->tx_pending); | 482 | log_sq_size = order_base_2(param->tx_pending); |
415 | min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending); | 483 | min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes); |
416 | 484 | ||
417 | if (log_rq_size == priv->params.log_rq_size && | 485 | if (log_rq_size == priv->params.log_rq_size && |
418 | log_sq_size == priv->params.log_sq_size && | 486 | log_sq_size == priv->params.log_sq_size && |
@@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev, | |||
454 | unsigned int count = ch->combined_count; | 522 | unsigned int count = ch->combined_count; |
455 | bool arfs_enabled; | 523 | bool arfs_enabled; |
456 | bool was_opened; | 524 | bool was_opened; |
525 | u32 num_mtts; | ||
457 | int err = 0; | 526 | int err = 0; |
458 | 527 | ||
459 | if (!count) { | 528 | if (!count) { |
@@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev, | |||
472 | return -EINVAL; | 541 | return -EINVAL; |
473 | } | 542 | } |
474 | 543 | ||
544 | num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size)); | ||
545 | if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && | ||
546 | !MLX5E_VALID_NUM_MTTS(num_mtts)) { | ||
547 | netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n", | ||
548 | __func__, count); | ||
549 | return -EINVAL; | ||
550 | } | ||
551 | |||
475 | if (priv->params.num_channels == count) | 552 | if (priv->params.num_channels == count) |
476 | return 0; | 553 | return 0; |
477 | 554 | ||
@@ -582,9 +659,10 @@ out: | |||
582 | static void ptys2ethtool_supported_link(unsigned long *supported_modes, | 659 | static void ptys2ethtool_supported_link(unsigned long *supported_modes, |
583 | u32 eth_proto_cap) | 660 | u32 eth_proto_cap) |
584 | { | 661 | { |
662 | unsigned long proto_cap = eth_proto_cap; | ||
585 | int proto; | 663 | int proto; |
586 | 664 | ||
587 | for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) | 665 | for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) |
588 | bitmap_or(supported_modes, supported_modes, | 666 | bitmap_or(supported_modes, supported_modes, |
589 | ptys2ethtool_table[proto].supported, | 667 | ptys2ethtool_table[proto].supported, |
590 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 668 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
@@ -593,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes, | |||
593 | static void ptys2ethtool_adver_link(unsigned long *advertising_modes, | 671 | static void ptys2ethtool_adver_link(unsigned long *advertising_modes, |
594 | u32 eth_proto_cap) | 672 | u32 eth_proto_cap) |
595 | { | 673 | { |
674 | unsigned long proto_cap = eth_proto_cap; | ||
596 | int proto; | 675 | int proto; |
597 | 676 | ||
598 | for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) | 677 | for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) |
599 | bitmap_or(advertising_modes, advertising_modes, | 678 | bitmap_or(advertising_modes, advertising_modes, |
600 | ptys2ethtool_table[proto].advertised, | 679 | ptys2ethtool_table[proto].advertised, |
601 | __ETHTOOL_LINK_MODE_MASK_NBITS); | 680 | __ETHTOOL_LINK_MODE_MASK_NBITS); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 870bea37c57c..2459c7f3db8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -39,13 +39,6 @@ | |||
39 | #include "eswitch.h" | 39 | #include "eswitch.h" |
40 | #include "vxlan.h" | 40 | #include "vxlan.h" |
41 | 41 | ||
42 | enum { | ||
43 | MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000, | ||
44 | MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20, | ||
45 | MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS / | ||
46 | MLX5_EN_QP_FLUSH_MSLEEP_QUANT, | ||
47 | }; | ||
48 | |||
49 | struct mlx5e_rq_param { | 42 | struct mlx5e_rq_param { |
50 | u32 rqc[MLX5_ST_SZ_DW(rqc)]; | 43 | u32 rqc[MLX5_ST_SZ_DW(rqc)]; |
51 | struct mlx5_wq_param wq; | 44 | struct mlx5_wq_param wq; |
@@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) | |||
162 | s->tx_queue_stopped += sq_stats->stopped; | 155 | s->tx_queue_stopped += sq_stats->stopped; |
163 | s->tx_queue_wake += sq_stats->wake; | 156 | s->tx_queue_wake += sq_stats->wake; |
164 | s->tx_queue_dropped += sq_stats->dropped; | 157 | s->tx_queue_dropped += sq_stats->dropped; |
158 | s->tx_xmit_more += sq_stats->xmit_more; | ||
165 | s->tx_csum_partial_inner += sq_stats->csum_partial_inner; | 159 | s->tx_csum_partial_inner += sq_stats->csum_partial_inner; |
166 | tx_offload_none += sq_stats->csum_none; | 160 | tx_offload_none += sq_stats->csum_none; |
167 | } | 161 | } |
@@ -340,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, | |||
340 | rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; | 334 | rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; |
341 | rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; | 335 | rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; |
342 | 336 | ||
337 | rq->mpwqe_mtt_offset = c->ix * | ||
338 | MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size)); | ||
339 | |||
343 | rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); | 340 | rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); |
344 | rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); | 341 | rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); |
345 | rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; | 342 | rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; |
@@ -428,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) | |||
428 | 425 | ||
429 | MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); | 426 | MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); |
430 | MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); | 427 | MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); |
431 | MLX5_SET(rqc, rqc, flush_in_error_en, 1); | ||
432 | MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); | 428 | MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); |
433 | MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - | 429 | MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - |
434 | MLX5_ADAPTER_PAGE_SHIFT); | 430 | MLX5_ADAPTER_PAGE_SHIFT); |
@@ -525,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) | |||
525 | return -ETIMEDOUT; | 521 | return -ETIMEDOUT; |
526 | } | 522 | } |
527 | 523 | ||
524 | static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) | ||
525 | { | ||
526 | struct mlx5_wq_ll *wq = &rq->wq; | ||
527 | struct mlx5e_rx_wqe *wqe; | ||
528 | __be16 wqe_ix_be; | ||
529 | u16 wqe_ix; | ||
530 | |||
531 | /* UMR WQE (if in progress) is always at wq->head */ | ||
532 | if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) | ||
533 | mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]); | ||
534 | |||
535 | while (!mlx5_wq_ll_is_empty(wq)) { | ||
536 | wqe_ix_be = *wq->tail_next; | ||
537 | wqe_ix = be16_to_cpu(wqe_ix_be); | ||
538 | wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); | ||
539 | rq->dealloc_wqe(rq, wqe_ix); | ||
540 | mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, | ||
541 | &wqe->next.next_wqe_index); | ||
542 | } | ||
543 | } | ||
544 | |||
528 | static int mlx5e_open_rq(struct mlx5e_channel *c, | 545 | static int mlx5e_open_rq(struct mlx5e_channel *c, |
529 | struct mlx5e_rq_param *param, | 546 | struct mlx5e_rq_param *param, |
530 | struct mlx5e_rq *rq) | 547 | struct mlx5e_rq *rq) |
@@ -548,8 +565,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, | |||
548 | if (param->am_enabled) | 565 | if (param->am_enabled) |
549 | set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); | 566 | set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); |
550 | 567 | ||
551 | set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); | ||
552 | |||
553 | sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; | 568 | sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; |
554 | sq->ico_wqe_info[pi].num_wqebbs = 1; | 569 | sq->ico_wqe_info[pi].num_wqebbs = 1; |
555 | mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ | 570 | mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ |
@@ -566,23 +581,8 @@ err_destroy_rq: | |||
566 | 581 | ||
567 | static void mlx5e_close_rq(struct mlx5e_rq *rq) | 582 | static void mlx5e_close_rq(struct mlx5e_rq *rq) |
568 | { | 583 | { |
569 | int tout = 0; | 584 | set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state); |
570 | int err; | ||
571 | |||
572 | clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); | ||
573 | napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ | 585 | napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ |
574 | |||
575 | err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); | ||
576 | while (!mlx5_wq_ll_is_empty(&rq->wq) && !err && | ||
577 | tout++ < MLX5_EN_QP_FLUSH_MAX_ITER) | ||
578 | msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); | ||
579 | |||
580 | if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER) | ||
581 | set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state); | ||
582 | |||
583 | /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ | ||
584 | napi_synchronize(&rq->channel->napi); | ||
585 | |||
586 | cancel_work_sync(&rq->am.work); | 586 | cancel_work_sync(&rq->am.work); |
587 | 587 | ||
588 | mlx5e_disable_rq(rq); | 588 | mlx5e_disable_rq(rq); |
@@ -821,7 +821,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, | |||
821 | goto err_disable_sq; | 821 | goto err_disable_sq; |
822 | 822 | ||
823 | if (sq->txq) { | 823 | if (sq->txq) { |
824 | set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); | ||
825 | netdev_tx_reset_queue(sq->txq); | 824 | netdev_tx_reset_queue(sq->txq); |
826 | netif_tx_start_queue(sq->txq); | 825 | netif_tx_start_queue(sq->txq); |
827 | } | 826 | } |
@@ -845,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) | |||
845 | 844 | ||
846 | static void mlx5e_close_sq(struct mlx5e_sq *sq) | 845 | static void mlx5e_close_sq(struct mlx5e_sq *sq) |
847 | { | 846 | { |
848 | int tout = 0; | 847 | set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); |
849 | int err; | 848 | /* prevent netif_tx_wake_queue */ |
849 | napi_synchronize(&sq->channel->napi); | ||
850 | 850 | ||
851 | if (sq->txq) { | 851 | if (sq->txq) { |
852 | clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); | ||
853 | /* prevent netif_tx_wake_queue */ | ||
854 | napi_synchronize(&sq->channel->napi); | ||
855 | netif_tx_disable_queue(sq->txq); | 852 | netif_tx_disable_queue(sq->txq); |
856 | 853 | ||
857 | /* ensure hw is notified of all pending wqes */ | 854 | /* last doorbell out, godspeed .. */ |
858 | if (mlx5e_sq_has_room_for(sq, 1)) | 855 | if (mlx5e_sq_has_room_for(sq, 1)) |
859 | mlx5e_send_nop(sq, true); | 856 | mlx5e_send_nop(sq, true); |
860 | |||
861 | err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, | ||
862 | MLX5_SQC_STATE_ERR, false, 0); | ||
863 | if (err) | ||
864 | set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); | ||
865 | } | ||
866 | |||
867 | /* wait till sq is empty, unless a TX timeout occurred on this SQ */ | ||
868 | while (sq->cc != sq->pc && | ||
869 | !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) { | ||
870 | msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); | ||
871 | if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER) | ||
872 | set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); | ||
873 | } | 857 | } |
874 | 858 | ||
875 | /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ | ||
876 | napi_synchronize(&sq->channel->napi); | ||
877 | |||
878 | mlx5e_free_tx_descs(sq); | ||
879 | mlx5e_disable_sq(sq); | 859 | mlx5e_disable_sq(sq); |
860 | mlx5e_free_tx_descs(sq); | ||
880 | mlx5e_destroy_sq(sq); | 861 | mlx5e_destroy_sq(sq); |
881 | } | 862 | } |
882 | 863 | ||
@@ -1826,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev) | |||
1826 | netif_set_real_num_tx_queues(netdev, num_txqs); | 1807 | netif_set_real_num_tx_queues(netdev, num_txqs); |
1827 | netif_set_real_num_rx_queues(netdev, priv->params.num_channels); | 1808 | netif_set_real_num_rx_queues(netdev, priv->params.num_channels); |
1828 | 1809 | ||
1829 | err = mlx5e_set_dev_port_mtu(netdev); | ||
1830 | if (err) | ||
1831 | goto err_clear_state_opened_flag; | ||
1832 | |||
1833 | err = mlx5e_open_channels(priv); | 1810 | err = mlx5e_open_channels(priv); |
1834 | if (err) { | 1811 | if (err) { |
1835 | netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", | 1812 | netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", |
@@ -2573,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) | |||
2573 | u16 max_mtu; | 2550 | u16 max_mtu; |
2574 | u16 min_mtu; | 2551 | u16 min_mtu; |
2575 | int err = 0; | 2552 | int err = 0; |
2553 | bool reset; | ||
2576 | 2554 | ||
2577 | mlx5_query_port_max_mtu(mdev, &max_mtu, 1); | 2555 | mlx5_query_port_max_mtu(mdev, &max_mtu, 1); |
2578 | 2556 | ||
@@ -2588,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) | |||
2588 | 2566 | ||
2589 | mutex_lock(&priv->state_lock); | 2567 | mutex_lock(&priv->state_lock); |
2590 | 2568 | ||
2569 | reset = !priv->params.lro_en && | ||
2570 | (priv->params.rq_wq_type != | ||
2571 | MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); | ||
2572 | |||
2591 | was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); | 2573 | was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); |
2592 | if (was_opened) | 2574 | if (was_opened && reset) |
2593 | mlx5e_close_locked(netdev); | 2575 | mlx5e_close_locked(netdev); |
2594 | 2576 | ||
2595 | netdev->mtu = new_mtu; | 2577 | netdev->mtu = new_mtu; |
2578 | mlx5e_set_dev_port_mtu(netdev); | ||
2596 | 2579 | ||
2597 | if (was_opened) | 2580 | if (was_opened && reset) |
2598 | err = mlx5e_open_locked(netdev); | 2581 | err = mlx5e_open_locked(netdev); |
2599 | 2582 | ||
2600 | mutex_unlock(&priv->state_lock); | 2583 | mutex_unlock(&priv->state_lock); |
@@ -2794,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev) | |||
2794 | if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) | 2777 | if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) |
2795 | continue; | 2778 | continue; |
2796 | sched_work = true; | 2779 | sched_work = true; |
2797 | set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); | 2780 | set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); |
2798 | netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", | 2781 | netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", |
2799 | i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); | 2782 | i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); |
2800 | } | 2783 | } |
@@ -3231,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) | |||
3231 | struct mlx5_create_mkey_mbox_in *in; | 3214 | struct mlx5_create_mkey_mbox_in *in; |
3232 | struct mlx5_mkey_seg *mkc; | 3215 | struct mlx5_mkey_seg *mkc; |
3233 | int inlen = sizeof(*in); | 3216 | int inlen = sizeof(*in); |
3234 | u64 npages = | 3217 | u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev), |
3235 | priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; | 3218 | BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)); |
3236 | int err; | 3219 | int err; |
3237 | 3220 | ||
3238 | in = mlx5_vzalloc(inlen); | 3221 | in = mlx5_vzalloc(inlen); |
@@ -3246,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) | |||
3246 | MLX5_PERM_LOCAL_WRITE | | 3229 | MLX5_PERM_LOCAL_WRITE | |
3247 | MLX5_ACCESS_MODE_MTT; | 3230 | MLX5_ACCESS_MODE_MTT; |
3248 | 3231 | ||
3232 | npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages); | ||
3233 | |||
3249 | mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | 3234 | mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); |
3250 | mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); | 3235 | mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); |
3251 | mkc->len = cpu_to_be64(npages << PAGE_SHIFT); | 3236 | mkc->len = cpu_to_be64(npages << PAGE_SHIFT); |
3252 | mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); | 3237 | mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages)); |
3253 | mkc->log2_page_size = PAGE_SHIFT; | 3238 | mkc->log2_page_size = PAGE_SHIFT; |
3254 | 3239 | ||
3255 | err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, | 3240 | err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, |
@@ -3385,6 +3370,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) | |||
3385 | queue_work(priv->wq, &priv->set_rx_mode_work); | 3370 | queue_work(priv->wq, &priv->set_rx_mode_work); |
3386 | 3371 | ||
3387 | if (MLX5_CAP_GEN(mdev, vport_group_manager)) { | 3372 | if (MLX5_CAP_GEN(mdev, vport_group_manager)) { |
3373 | mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); | ||
3388 | rep.load = mlx5e_nic_rep_load; | 3374 | rep.load = mlx5e_nic_rep_load; |
3389 | rep.unload = mlx5e_nic_rep_unload; | 3375 | rep.unload = mlx5e_nic_rep_unload; |
3390 | rep.vport = 0; | 3376 | rep.vport = 0; |
@@ -3463,6 +3449,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev, | |||
3463 | 3449 | ||
3464 | mlx5e_init_l2_addr(priv); | 3450 | mlx5e_init_l2_addr(priv); |
3465 | 3451 | ||
3452 | mlx5e_set_dev_port_mtu(netdev); | ||
3453 | |||
3466 | err = register_netdev(netdev); | 3454 | err = register_netdev(netdev); |
3467 | if (err) { | 3455 | if (err) { |
3468 | mlx5_core_err(mdev, "register_netdev failed, %d\n", err); | 3456 | mlx5_core_err(mdev, "register_netdev failed, %d\n", err); |
@@ -3501,16 +3489,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) | |||
3501 | struct mlx5_eswitch *esw = mdev->priv.eswitch; | 3489 | struct mlx5_eswitch *esw = mdev->priv.eswitch; |
3502 | int total_vfs = MLX5_TOTAL_VPORTS(mdev); | 3490 | int total_vfs = MLX5_TOTAL_VPORTS(mdev); |
3503 | int vport; | 3491 | int vport; |
3492 | u8 mac[ETH_ALEN]; | ||
3504 | 3493 | ||
3505 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | 3494 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) |
3506 | return; | 3495 | return; |
3507 | 3496 | ||
3497 | mlx5_query_nic_vport_mac_address(mdev, 0, mac); | ||
3498 | |||
3508 | for (vport = 1; vport < total_vfs; vport++) { | 3499 | for (vport = 1; vport < total_vfs; vport++) { |
3509 | struct mlx5_eswitch_rep rep; | 3500 | struct mlx5_eswitch_rep rep; |
3510 | 3501 | ||
3511 | rep.load = mlx5e_vport_rep_load; | 3502 | rep.load = mlx5e_vport_rep_load; |
3512 | rep.unload = mlx5e_vport_rep_unload; | 3503 | rep.unload = mlx5e_vport_rep_unload; |
3513 | rep.vport = vport; | 3504 | rep.vport = vport; |
3505 | ether_addr_copy(rep.hw_id, mac); | ||
3514 | mlx5_eswitch_register_vport_rep(esw, &rep); | 3506 | mlx5_eswitch_register_vport_rep(esw, &rep); |
3515 | } | 3507 | } |
3516 | } | 3508 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 1c7d8b8314bf..134de4a11f1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { | |||
135 | int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) | 135 | int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) |
136 | { | 136 | { |
137 | struct mlx5e_priv *priv = netdev_priv(dev); | 137 | struct mlx5e_priv *priv = netdev_priv(dev); |
138 | struct mlx5_eswitch_rep *rep = priv->ppriv; | ||
138 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | 139 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
139 | u8 mac[ETH_ALEN]; | ||
140 | 140 | ||
141 | if (esw->mode == SRIOV_NONE) | 141 | if (esw->mode == SRIOV_NONE) |
142 | return -EOPNOTSUPP; | 142 | return -EOPNOTSUPP; |
143 | 143 | ||
144 | switch (attr->id) { | 144 | switch (attr->id) { |
145 | case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: | 145 | case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: |
146 | mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac); | ||
147 | attr->u.ppid.id_len = ETH_ALEN; | 146 | attr->u.ppid.id_len = ETH_ALEN; |
148 | memcpy(&attr->u.ppid.id, &mac, ETH_ALEN); | 147 | ether_addr_copy(attr->u.ppid.id, rep->hw_id); |
149 | break; | 148 | break; |
150 | default: | 149 | default: |
151 | return -EOPNOTSUPP; | 150 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 9f2a16a507e0..e7c969df3dad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, | |||
324 | } | 324 | } |
325 | } | 325 | } |
326 | 326 | ||
327 | static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) | 327 | static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) |
328 | { | 328 | { |
329 | return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + | 329 | return rq->mpwqe_mtt_offset + |
330 | wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); | 330 | wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); |
331 | } | 331 | } |
332 | 332 | ||
@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, | |||
340 | struct mlx5_wqe_data_seg *dseg = &wqe->data; | 340 | struct mlx5_wqe_data_seg *dseg = &wqe->data; |
341 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; | 341 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; |
342 | u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); | 342 | u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); |
343 | u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); | 343 | u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); |
344 | 344 | ||
345 | memset(wqe, 0, sizeof(*wqe)); | 345 | memset(wqe, 0, sizeof(*wqe)); |
346 | cseg->opmod_idx_opcode = | 346 | cseg->opmod_idx_opcode = |
@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, | |||
353 | 353 | ||
354 | ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; | 354 | ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; |
355 | ucseg->klm_octowords = | 355 | ucseg->klm_octowords = |
356 | cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); | 356 | cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); |
357 | ucseg->bsf_octowords = | 357 | ucseg->bsf_octowords = |
358 | cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); | 358 | cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); |
359 | ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); | 359 | ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); |
360 | 360 | ||
361 | dseg->lkey = sq->mkey_be; | 361 | dseg->lkey = sq->mkey_be; |
@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, | |||
423 | { | 423 | { |
424 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; | 424 | struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; |
425 | int mtt_sz = mlx5e_get_wqe_mtt_sz(); | 425 | int mtt_sz = mlx5e_get_wqe_mtt_sz(); |
426 | u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; | 426 | u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT; |
427 | int i; | 427 | int i; |
428 | 428 | ||
429 | wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * | 429 | wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * |
@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq) | |||
506 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); | 506 | struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); |
507 | 507 | ||
508 | clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); | 508 | clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); |
509 | |||
510 | if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) { | ||
511 | mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]); | ||
512 | return; | ||
513 | } | ||
514 | |||
509 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); | 515 | mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); |
510 | rq->stats.mpwqe_frag++; | 516 | rq->stats.mpwqe_frag++; |
511 | 517 | ||
@@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) | |||
595 | wi->free_wqe(rq, wi); | 601 | wi->free_wqe(rq, wi); |
596 | } | 602 | } |
597 | 603 | ||
598 | void mlx5e_free_rx_descs(struct mlx5e_rq *rq) | ||
599 | { | ||
600 | struct mlx5_wq_ll *wq = &rq->wq; | ||
601 | struct mlx5e_rx_wqe *wqe; | ||
602 | __be16 wqe_ix_be; | ||
603 | u16 wqe_ix; | ||
604 | |||
605 | while (!mlx5_wq_ll_is_empty(wq)) { | ||
606 | wqe_ix_be = *wq->tail_next; | ||
607 | wqe_ix = be16_to_cpu(wqe_ix_be); | ||
608 | wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); | ||
609 | rq->dealloc_wqe(rq, wqe_ix); | ||
610 | mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, | ||
611 | &wqe->next.next_wqe_index); | ||
612 | } | ||
613 | } | ||
614 | |||
615 | #define RQ_CANNOT_POST(rq) \ | 604 | #define RQ_CANNOT_POST(rq) \ |
616 | (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ | 605 | (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \ |
617 | test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) | 606 | test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) |
618 | 607 | ||
619 | bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) | 608 | bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) |
620 | { | 609 | { |
@@ -648,24 +637,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) | |||
648 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, | 637 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, |
649 | u32 cqe_bcnt) | 638 | u32 cqe_bcnt) |
650 | { | 639 | { |
651 | struct ethhdr *eth = (struct ethhdr *)(skb->data); | 640 | struct ethhdr *eth = (struct ethhdr *)(skb->data); |
652 | struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); | 641 | struct iphdr *ipv4; |
653 | struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); | 642 | struct ipv6hdr *ipv6; |
654 | struct tcphdr *tcp; | 643 | struct tcphdr *tcp; |
644 | int network_depth = 0; | ||
645 | __be16 proto; | ||
646 | u16 tot_len; | ||
655 | 647 | ||
656 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | 648 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); |
657 | int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || | 649 | int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || |
658 | (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); | 650 | (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); |
659 | 651 | ||
660 | u16 tot_len = cqe_bcnt - ETH_HLEN; | 652 | skb->mac_len = ETH_HLEN; |
653 | proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); | ||
654 | |||
655 | ipv4 = (struct iphdr *)(skb->data + network_depth); | ||
656 | ipv6 = (struct ipv6hdr *)(skb->data + network_depth); | ||
657 | tot_len = cqe_bcnt - network_depth; | ||
661 | 658 | ||
662 | if (eth->h_proto == htons(ETH_P_IP)) { | 659 | if (proto == htons(ETH_P_IP)) { |
663 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + | 660 | tcp = (struct tcphdr *)(skb->data + network_depth + |
664 | sizeof(struct iphdr)); | 661 | sizeof(struct iphdr)); |
665 | ipv6 = NULL; | 662 | ipv6 = NULL; |
666 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | 663 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
667 | } else { | 664 | } else { |
668 | tcp = (struct tcphdr *)(skb->data + ETH_HLEN + | 665 | tcp = (struct tcphdr *)(skb->data + network_depth + |
669 | sizeof(struct ipv6hdr)); | 666 | sizeof(struct ipv6hdr)); |
670 | ipv4 = NULL; | 667 | ipv4 = NULL; |
671 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | 668 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
@@ -916,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) | |||
916 | struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); | 913 | struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); |
917 | int work_done = 0; | 914 | int work_done = 0; |
918 | 915 | ||
919 | if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) | 916 | if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) |
920 | return 0; | 917 | return 0; |
921 | 918 | ||
922 | if (cq->decmprs_left) | 919 | if (cq->decmprs_left) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 7b9d8a989b52..499487ce3b53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
@@ -70,6 +70,7 @@ struct mlx5e_sw_stats { | |||
70 | u64 tx_queue_stopped; | 70 | u64 tx_queue_stopped; |
71 | u64 tx_queue_wake; | 71 | u64 tx_queue_wake; |
72 | u64 tx_queue_dropped; | 72 | u64 tx_queue_dropped; |
73 | u64 tx_xmit_more; | ||
73 | u64 rx_wqe_err; | 74 | u64 rx_wqe_err; |
74 | u64 rx_mpwqe_filler; | 75 | u64 rx_mpwqe_filler; |
75 | u64 rx_mpwqe_frag; | 76 | u64 rx_mpwqe_frag; |
@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = { | |||
101 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, | 102 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, |
102 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, | 103 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, |
103 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, | 104 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, |
105 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, | ||
104 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, | 106 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, |
105 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, | 107 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, |
106 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, | 108 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, |
@@ -298,6 +300,7 @@ struct mlx5e_sq_stats { | |||
298 | /* commonly accessed in data path */ | 300 | /* commonly accessed in data path */ |
299 | u64 packets; | 301 | u64 packets; |
300 | u64 bytes; | 302 | u64 bytes; |
303 | u64 xmit_more; | ||
301 | u64 tso_packets; | 304 | u64 tso_packets; |
302 | u64 tso_bytes; | 305 | u64 tso_bytes; |
303 | u64 tso_inner_packets; | 306 | u64 tso_inner_packets; |
@@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = { | |||
324 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, | 327 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, |
325 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, | 328 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, |
326 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, | 329 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, |
330 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, | ||
327 | }; | 331 | }; |
328 | 332 | ||
329 | #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) | 333 | #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0f19b01e3fff..22cfc4ac1837 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec | |||
170 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { | 170 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { |
171 | struct flow_dissector_key_control *key = | 171 | struct flow_dissector_key_control *key = |
172 | skb_flow_dissector_target(f->dissector, | 172 | skb_flow_dissector_target(f->dissector, |
173 | FLOW_DISSECTOR_KEY_BASIC, | 173 | FLOW_DISSECTOR_KEY_CONTROL, |
174 | f->key); | 174 | f->key); |
175 | addr_type = key->addr_type; | 175 | addr_type = key->addr_type; |
176 | } | 176 | } |
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
318 | u32 *action, u32 *flow_tag) | 318 | u32 *action, u32 *flow_tag) |
319 | { | 319 | { |
320 | const struct tc_action *a; | 320 | const struct tc_action *a; |
321 | LIST_HEAD(actions); | ||
321 | 322 | ||
322 | if (tc_no_actions(exts)) | 323 | if (tc_no_actions(exts)) |
323 | return -EINVAL; | 324 | return -EINVAL; |
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
325 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 326 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
326 | *action = 0; | 327 | *action = 0; |
327 | 328 | ||
328 | tc_for_each_action(a, exts) { | 329 | tcf_exts_to_list(exts, &actions); |
330 | list_for_each_entry(a, &actions, list) { | ||
329 | /* Only support a single action per rule */ | 331 | /* Only support a single action per rule */ |
330 | if (*action) | 332 | if (*action) |
331 | return -EINVAL; | 333 | return -EINVAL; |
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
362 | u32 *action, u32 *dest_vport) | 364 | u32 *action, u32 *dest_vport) |
363 | { | 365 | { |
364 | const struct tc_action *a; | 366 | const struct tc_action *a; |
367 | LIST_HEAD(actions); | ||
365 | 368 | ||
366 | if (tc_no_actions(exts)) | 369 | if (tc_no_actions(exts)) |
367 | return -EINVAL; | 370 | return -EINVAL; |
368 | 371 | ||
369 | *action = 0; | 372 | *action = 0; |
370 | 373 | ||
371 | tc_for_each_action(a, exts) { | 374 | tcf_exts_to_list(exts, &actions); |
375 | list_for_each_entry(a, &actions, list) { | ||
372 | /* Only support a single action per rule */ | 376 | /* Only support a single action per rule */ |
373 | if (*action) | 377 | if (*action) |
374 | return -EINVAL; | 378 | return -EINVAL; |
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
503 | struct mlx5e_tc_flow *flow; | 507 | struct mlx5e_tc_flow *flow; |
504 | struct tc_action *a; | 508 | struct tc_action *a; |
505 | struct mlx5_fc *counter; | 509 | struct mlx5_fc *counter; |
510 | LIST_HEAD(actions); | ||
506 | u64 bytes; | 511 | u64 bytes; |
507 | u64 packets; | 512 | u64 packets; |
508 | u64 lastuse; | 513 | u64 lastuse; |
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
518 | 523 | ||
519 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); | 524 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); |
520 | 525 | ||
521 | tc_for_each_action(a, f->exts) | 526 | tcf_exts_to_list(f->exts, &actions); |
527 | list_for_each_entry(a, &actions, list) | ||
522 | tcf_action_stats_update(a, bytes, packets, lastuse); | 528 | tcf_action_stats_update(a, bytes, packets, lastuse); |
523 | 529 | ||
524 | return 0; | 530 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index e073bf59890d..eb0e72537f10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
356 | sq->stats.stopped++; | 356 | sq->stats.stopped++; |
357 | } | 357 | } |
358 | 358 | ||
359 | sq->stats.xmit_more += skb->xmit_more; | ||
359 | if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { | 360 | if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { |
360 | int bf_sz = 0; | 361 | int bf_sz = 0; |
361 | 362 | ||
@@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) | |||
394 | return mlx5e_sq_xmit(sq, skb); | 395 | return mlx5e_sq_xmit(sq, skb); |
395 | } | 396 | } |
396 | 397 | ||
397 | void mlx5e_free_tx_descs(struct mlx5e_sq *sq) | ||
398 | { | ||
399 | struct mlx5e_tx_wqe_info *wi; | ||
400 | struct sk_buff *skb; | ||
401 | u16 ci; | ||
402 | int i; | ||
403 | |||
404 | while (sq->cc != sq->pc) { | ||
405 | ci = sq->cc & sq->wq.sz_m1; | ||
406 | skb = sq->skb[ci]; | ||
407 | wi = &sq->wqe_info[ci]; | ||
408 | |||
409 | if (!skb) { /* nop */ | ||
410 | sq->cc++; | ||
411 | continue; | ||
412 | } | ||
413 | |||
414 | for (i = 0; i < wi->num_dma; i++) { | ||
415 | struct mlx5e_sq_dma *dma = | ||
416 | mlx5e_dma_get(sq, sq->dma_fifo_cc++); | ||
417 | |||
418 | mlx5e_tx_dma_unmap(sq->pdev, dma); | ||
419 | } | ||
420 | |||
421 | dev_kfree_skb_any(skb); | ||
422 | sq->cc += wi->num_wqebbs; | ||
423 | } | ||
424 | } | ||
425 | |||
426 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) | 398 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) |
427 | { | 399 | { |
428 | struct mlx5e_sq *sq; | 400 | struct mlx5e_sq *sq; |
@@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) | |||
434 | 406 | ||
435 | sq = container_of(cq, struct mlx5e_sq, cq); | 407 | sq = container_of(cq, struct mlx5e_sq, cq); |
436 | 408 | ||
437 | if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) | 409 | if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) |
438 | return false; | 410 | return false; |
439 | 411 | ||
440 | npkts = 0; | 412 | npkts = 0; |
@@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) | |||
512 | netdev_tx_completed_queue(sq->txq, npkts, nbytes); | 484 | netdev_tx_completed_queue(sq->txq, npkts, nbytes); |
513 | 485 | ||
514 | if (netif_tx_queue_stopped(sq->txq) && | 486 | if (netif_tx_queue_stopped(sq->txq) && |
515 | mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && | 487 | mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) { |
516 | likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { | 488 | netif_tx_wake_queue(sq->txq); |
517 | netif_tx_wake_queue(sq->txq); | 489 | sq->stats.wake++; |
518 | sq->stats.wake++; | ||
519 | } | 490 | } |
520 | 491 | ||
521 | return (i == MLX5E_TX_CQ_POLL_BUDGET); | 492 | return (i == MLX5E_TX_CQ_POLL_BUDGET); |
522 | } | 493 | } |
494 | |||
495 | void mlx5e_free_tx_descs(struct mlx5e_sq *sq) | ||
496 | { | ||
497 | struct mlx5e_tx_wqe_info *wi; | ||
498 | struct sk_buff *skb; | ||
499 | u16 ci; | ||
500 | int i; | ||
501 | |||
502 | while (sq->cc != sq->pc) { | ||
503 | ci = sq->cc & sq->wq.sz_m1; | ||
504 | skb = sq->skb[ci]; | ||
505 | wi = &sq->wqe_info[ci]; | ||
506 | |||
507 | if (!skb) { /* nop */ | ||
508 | sq->cc++; | ||
509 | continue; | ||
510 | } | ||
511 | |||
512 | for (i = 0; i < wi->num_dma; i++) { | ||
513 | struct mlx5e_sq_dma *dma = | ||
514 | mlx5e_dma_get(sq, sq->dma_fifo_cc++); | ||
515 | |||
516 | mlx5e_tx_dma_unmap(sq->pdev, dma); | ||
517 | } | ||
518 | |||
519 | dev_kfree_skb_any(skb); | ||
520 | sq->cc += wi->num_wqebbs; | ||
521 | } | ||
522 | } | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 64ae2e800daa..9bf33bb69210 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | |||
@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) | |||
51 | 51 | ||
52 | static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) | 52 | static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) |
53 | { | 53 | { |
54 | struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq); | ||
54 | struct mlx5_wq_cyc *wq; | 55 | struct mlx5_wq_cyc *wq; |
55 | struct mlx5_cqe64 *cqe; | 56 | struct mlx5_cqe64 *cqe; |
56 | struct mlx5e_sq *sq; | ||
57 | u16 sqcc; | 57 | u16 sqcc; |
58 | 58 | ||
59 | if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) | ||
60 | return; | ||
61 | |||
59 | cqe = mlx5e_get_cqe(cq); | 62 | cqe = mlx5e_get_cqe(cq); |
60 | if (likely(!cqe)) | 63 | if (likely(!cqe)) |
61 | return; | 64 | return; |
62 | 65 | ||
63 | sq = container_of(cq, struct mlx5e_sq, cq); | ||
64 | wq = &sq->wq; | 66 | wq = &sq->wq; |
65 | 67 | ||
66 | /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), | 68 | /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f6d667797ee1..b247949df135 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -1451,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
1451 | 1451 | ||
1452 | esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); | 1452 | esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); |
1453 | 1453 | ||
1454 | if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ | 1454 | /* Only VFs need ACLs for VST and spoofchk filtering */ |
1455 | if (vport_num && esw->mode == SRIOV_LEGACY) { | ||
1455 | esw_vport_ingress_config(esw, vport); | 1456 | esw_vport_ingress_config(esw, vport); |
1456 | esw_vport_egress_config(esw, vport); | 1457 | esw_vport_egress_config(esw, vport); |
1457 | } | 1458 | } |
@@ -1502,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) | |||
1502 | */ | 1503 | */ |
1503 | esw_vport_change_handle_locked(vport); | 1504 | esw_vport_change_handle_locked(vport); |
1504 | vport->enabled_events = 0; | 1505 | vport->enabled_events = 0; |
1505 | if (vport_num) { | 1506 | if (vport_num && esw->mode == SRIOV_LEGACY) { |
1506 | esw_vport_disable_egress_acl(esw, vport); | 1507 | esw_vport_disable_egress_acl(esw, vport); |
1507 | esw_vport_disable_ingress_acl(esw, vport); | 1508 | esw_vport_disable_ingress_acl(esw, vport); |
1508 | } | 1509 | } |
@@ -1553,6 +1554,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) | |||
1553 | 1554 | ||
1554 | abort: | 1555 | abort: |
1555 | esw_enable_vport(esw, 0, UC_ADDR_CHANGE); | 1556 | esw_enable_vport(esw, 0, UC_ADDR_CHANGE); |
1557 | esw->mode = SRIOV_NONE; | ||
1556 | return err; | 1558 | return err; |
1557 | } | 1559 | } |
1558 | 1560 | ||
@@ -1767,7 +1769,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
1767 | vport, err); | 1769 | vport, err); |
1768 | 1770 | ||
1769 | mutex_lock(&esw->state_lock); | 1771 | mutex_lock(&esw->state_lock); |
1770 | if (evport->enabled) | 1772 | if (evport->enabled && esw->mode == SRIOV_LEGACY) |
1771 | err = esw_vport_ingress_config(esw, evport); | 1773 | err = esw_vport_ingress_config(esw, evport); |
1772 | mutex_unlock(&esw->state_lock); | 1774 | mutex_unlock(&esw->state_lock); |
1773 | return err; | 1775 | return err; |
@@ -1839,7 +1841,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, | |||
1839 | mutex_lock(&esw->state_lock); | 1841 | mutex_lock(&esw->state_lock); |
1840 | evport->vlan = vlan; | 1842 | evport->vlan = vlan; |
1841 | evport->qos = qos; | 1843 | evport->qos = qos; |
1842 | if (evport->enabled) { | 1844 | if (evport->enabled && esw->mode == SRIOV_LEGACY) { |
1843 | err = esw_vport_ingress_config(esw, evport); | 1845 | err = esw_vport_ingress_config(esw, evport); |
1844 | if (err) | 1846 | if (err) |
1845 | goto out; | 1847 | goto out; |
@@ -1868,10 +1870,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, | |||
1868 | mutex_lock(&esw->state_lock); | 1870 | mutex_lock(&esw->state_lock); |
1869 | pschk = evport->spoofchk; | 1871 | pschk = evport->spoofchk; |
1870 | evport->spoofchk = spoofchk; | 1872 | evport->spoofchk = spoofchk; |
1871 | if (evport->enabled) | 1873 | if (evport->enabled && esw->mode == SRIOV_LEGACY) { |
1872 | err = esw_vport_ingress_config(esw, evport); | 1874 | err = esw_vport_ingress_config(esw, evport); |
1873 | if (err) | 1875 | if (err) |
1874 | evport->spoofchk = pschk; | 1876 | evport->spoofchk = pschk; |
1877 | } | ||
1875 | mutex_unlock(&esw->state_lock); | 1878 | mutex_unlock(&esw->state_lock); |
1876 | 1879 | ||
1877 | return err; | 1880 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index c0b05603fc31..a96140971d77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | |||
@@ -174,6 +174,7 @@ struct mlx5_eswitch_rep { | |||
174 | void *priv_data; | 174 | void *priv_data; |
175 | struct list_head vport_sqs_list; | 175 | struct list_head vport_sqs_list; |
176 | bool valid; | 176 | bool valid; |
177 | u8 hw_id[ETH_ALEN]; | ||
177 | }; | 178 | }; |
178 | 179 | ||
179 | struct mlx5_esw_offload { | 180 | struct mlx5_esw_offload { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index a357e8eeeed8..7de40e6b0c25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -113,7 +113,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn | |||
113 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | 113 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
114 | dest.vport_num = vport; | 114 | dest.vport_num = vport; |
115 | 115 | ||
116 | flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec, | 116 | flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, |
117 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | 117 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, |
118 | 0, &dest); | 118 | 0, &dest); |
119 | if (IS_ERR(flow_rule)) | 119 | if (IS_ERR(flow_rule)) |
@@ -446,7 +446,7 @@ out: | |||
446 | 446 | ||
447 | static int esw_offloads_start(struct mlx5_eswitch *esw) | 447 | static int esw_offloads_start(struct mlx5_eswitch *esw) |
448 | { | 448 | { |
449 | int err, num_vfs = esw->dev->priv.sriov.num_vfs; | 449 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
450 | 450 | ||
451 | if (esw->mode != SRIOV_LEGACY) { | 451 | if (esw->mode != SRIOV_LEGACY) { |
452 | esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); | 452 | esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); |
@@ -455,8 +455,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) | |||
455 | 455 | ||
456 | mlx5_eswitch_disable_sriov(esw); | 456 | mlx5_eswitch_disable_sriov(esw); |
457 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); | 457 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); |
458 | if (err) | 458 | if (err) { |
459 | esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err); | 459 | esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); |
460 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | ||
461 | if (err1) | ||
462 | esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); | ||
463 | } | ||
460 | return err; | 464 | return err; |
461 | } | 465 | } |
462 | 466 | ||
@@ -508,12 +512,16 @@ create_ft_err: | |||
508 | 512 | ||
509 | static int esw_offloads_stop(struct mlx5_eswitch *esw) | 513 | static int esw_offloads_stop(struct mlx5_eswitch *esw) |
510 | { | 514 | { |
511 | int err, num_vfs = esw->dev->priv.sriov.num_vfs; | 515 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
512 | 516 | ||
513 | mlx5_eswitch_disable_sriov(esw); | 517 | mlx5_eswitch_disable_sriov(esw); |
514 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | 518 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); |
515 | if (err) | 519 | if (err) { |
516 | esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err); | 520 | esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); |
521 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); | ||
522 | if (err1) | ||
523 | esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); | ||
524 | } | ||
517 | 525 | ||
518 | return err; | 526 | return err; |
519 | } | 527 | } |
@@ -535,7 +543,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) | |||
535 | esw_destroy_offloads_fdb_table(esw); | 543 | esw_destroy_offloads_fdb_table(esw); |
536 | } | 544 | } |
537 | 545 | ||
538 | static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) | 546 | static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) |
539 | { | 547 | { |
540 | switch (mode) { | 548 | switch (mode) { |
541 | case DEVLINK_ESWITCH_MODE_LEGACY: | 549 | case DEVLINK_ESWITCH_MODE_LEGACY: |
@@ -551,6 +559,22 @@ static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) | |||
551 | return 0; | 559 | return 0; |
552 | } | 560 | } |
553 | 561 | ||
562 | static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) | ||
563 | { | ||
564 | switch (mlx5_mode) { | ||
565 | case SRIOV_LEGACY: | ||
566 | *mode = DEVLINK_ESWITCH_MODE_LEGACY; | ||
567 | break; | ||
568 | case SRIOV_OFFLOADS: | ||
569 | *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; | ||
570 | break; | ||
571 | default: | ||
572 | return -EINVAL; | ||
573 | } | ||
574 | |||
575 | return 0; | ||
576 | } | ||
577 | |||
554 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | 578 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) |
555 | { | 579 | { |
556 | struct mlx5_core_dev *dev; | 580 | struct mlx5_core_dev *dev; |
@@ -566,7 +590,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) | |||
566 | if (cur_mlx5_mode == SRIOV_NONE) | 590 | if (cur_mlx5_mode == SRIOV_NONE) |
567 | return -EOPNOTSUPP; | 591 | return -EOPNOTSUPP; |
568 | 592 | ||
569 | if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode)) | 593 | if (esw_mode_from_devlink(mode, &mlx5_mode)) |
570 | return -EINVAL; | 594 | return -EINVAL; |
571 | 595 | ||
572 | if (cur_mlx5_mode == mlx5_mode) | 596 | if (cur_mlx5_mode == mlx5_mode) |
@@ -592,9 +616,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) | |||
592 | if (dev->priv.eswitch->mode == SRIOV_NONE) | 616 | if (dev->priv.eswitch->mode == SRIOV_NONE) |
593 | return -EOPNOTSUPP; | 617 | return -EOPNOTSUPP; |
594 | 618 | ||
595 | *mode = dev->priv.eswitch->mode; | 619 | return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); |
596 | |||
597 | return 0; | ||
598 | } | 620 | } |
599 | 621 | ||
600 | void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, | 622 | void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 9134010e2921..287ade151ec8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | |||
@@ -425,11 +425,11 @@ struct mlx5_cmd_fc_bulk * | |||
425 | mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) | 425 | mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) |
426 | { | 426 | { |
427 | struct mlx5_cmd_fc_bulk *b; | 427 | struct mlx5_cmd_fc_bulk *b; |
428 | int outlen = sizeof(*b) + | 428 | int outlen = |
429 | MLX5_ST_SZ_BYTES(query_flow_counter_out) + | 429 | MLX5_ST_SZ_BYTES(query_flow_counter_out) + |
430 | MLX5_ST_SZ_BYTES(traffic_counter) * num; | 430 | MLX5_ST_SZ_BYTES(traffic_counter) * num; |
431 | 431 | ||
432 | b = kzalloc(outlen, GFP_KERNEL); | 432 | b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL); |
433 | if (!b) | 433 | if (!b) |
434 | return NULL; | 434 | return NULL; |
435 | 435 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 75bb8c864557..3d6c1f65e586 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -80,7 +80,7 @@ | |||
80 | LEFTOVERS_NUM_PRIOS) | 80 | LEFTOVERS_NUM_PRIOS) |
81 | 81 | ||
82 | #define ETHTOOL_PRIO_NUM_LEVELS 1 | 82 | #define ETHTOOL_PRIO_NUM_LEVELS 1 |
83 | #define ETHTOOL_NUM_PRIOS 10 | 83 | #define ETHTOOL_NUM_PRIOS 11 |
84 | #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) | 84 | #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) |
85 | /* Vlan, mac, ttc, aRFS */ | 85 | /* Vlan, mac, ttc, aRFS */ |
86 | #define KERNEL_NIC_PRIO_NUM_LEVELS 4 | 86 | #define KERNEL_NIC_PRIO_NUM_LEVELS 4 |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index c2877e9de8a1..3a9195b4169d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c | |||
@@ -126,12 +126,21 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, | |||
126 | for (node = &first->node; node; node = rb_next(node)) { | 126 | for (node = &first->node; node; node = rb_next(node)) { |
127 | struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); | 127 | struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); |
128 | struct mlx5_fc_cache *c = &counter->cache; | 128 | struct mlx5_fc_cache *c = &counter->cache; |
129 | u64 packets; | ||
130 | u64 bytes; | ||
129 | 131 | ||
130 | if (counter->id > last_id) | 132 | if (counter->id > last_id) |
131 | break; | 133 | break; |
132 | 134 | ||
133 | mlx5_cmd_fc_bulk_get(dev, b, | 135 | mlx5_cmd_fc_bulk_get(dev, b, |
134 | counter->id, &c->packets, &c->bytes); | 136 | counter->id, &packets, &bytes); |
137 | |||
138 | if (c->packets == packets) | ||
139 | continue; | ||
140 | |||
141 | c->packets = packets; | ||
142 | c->bytes = bytes; | ||
143 | c->lastuse = jiffies; | ||
135 | } | 144 | } |
136 | 145 | ||
137 | out: | 146 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4f491d43e77d..2385bae92672 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -1420,36 +1420,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, | |||
1420 | dev_info(&pdev->dev, "%s was called\n", __func__); | 1420 | dev_info(&pdev->dev, "%s was called\n", __func__); |
1421 | mlx5_enter_error_state(dev); | 1421 | mlx5_enter_error_state(dev); |
1422 | mlx5_unload_one(dev, priv); | 1422 | mlx5_unload_one(dev, priv); |
1423 | pci_save_state(pdev); | ||
1423 | mlx5_pci_disable_device(dev); | 1424 | mlx5_pci_disable_device(dev); |
1424 | return state == pci_channel_io_perm_failure ? | 1425 | return state == pci_channel_io_perm_failure ? |
1425 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; | 1426 | PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; |
1426 | } | 1427 | } |
1427 | 1428 | ||
1428 | static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) | ||
1429 | { | ||
1430 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); | ||
1431 | int err = 0; | ||
1432 | |||
1433 | dev_info(&pdev->dev, "%s was called\n", __func__); | ||
1434 | |||
1435 | err = mlx5_pci_enable_device(dev); | ||
1436 | if (err) { | ||
1437 | dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" | ||
1438 | , __func__, err); | ||
1439 | return PCI_ERS_RESULT_DISCONNECT; | ||
1440 | } | ||
1441 | pci_set_master(pdev); | ||
1442 | pci_set_power_state(pdev, PCI_D0); | ||
1443 | pci_restore_state(pdev); | ||
1444 | |||
1445 | return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; | ||
1446 | } | ||
1447 | |||
1448 | void mlx5_disable_device(struct mlx5_core_dev *dev) | ||
1449 | { | ||
1450 | mlx5_pci_err_detected(dev->pdev, 0); | ||
1451 | } | ||
1452 | |||
1453 | /* wait for the device to show vital signs by waiting | 1429 | /* wait for the device to show vital signs by waiting |
1454 | * for the health counter to start counting. | 1430 | * for the health counter to start counting. |
1455 | */ | 1431 | */ |
@@ -1477,21 +1453,44 @@ static int wait_vital(struct pci_dev *pdev) | |||
1477 | return -ETIMEDOUT; | 1453 | return -ETIMEDOUT; |
1478 | } | 1454 | } |
1479 | 1455 | ||
1480 | static void mlx5_pci_resume(struct pci_dev *pdev) | 1456 | static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) |
1481 | { | 1457 | { |
1482 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); | 1458 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); |
1483 | struct mlx5_priv *priv = &dev->priv; | ||
1484 | int err; | 1459 | int err; |
1485 | 1460 | ||
1486 | dev_info(&pdev->dev, "%s was called\n", __func__); | 1461 | dev_info(&pdev->dev, "%s was called\n", __func__); |
1487 | 1462 | ||
1488 | pci_save_state(pdev); | 1463 | err = mlx5_pci_enable_device(dev); |
1489 | err = wait_vital(pdev); | ||
1490 | if (err) { | 1464 | if (err) { |
1465 | dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" | ||
1466 | , __func__, err); | ||
1467 | return PCI_ERS_RESULT_DISCONNECT; | ||
1468 | } | ||
1469 | |||
1470 | pci_set_master(pdev); | ||
1471 | pci_restore_state(pdev); | ||
1472 | |||
1473 | if (wait_vital(pdev)) { | ||
1491 | dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); | 1474 | dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); |
1492 | return; | 1475 | return PCI_ERS_RESULT_DISCONNECT; |
1493 | } | 1476 | } |
1494 | 1477 | ||
1478 | return PCI_ERS_RESULT_RECOVERED; | ||
1479 | } | ||
1480 | |||
1481 | void mlx5_disable_device(struct mlx5_core_dev *dev) | ||
1482 | { | ||
1483 | mlx5_pci_err_detected(dev->pdev, 0); | ||
1484 | } | ||
1485 | |||
1486 | static void mlx5_pci_resume(struct pci_dev *pdev) | ||
1487 | { | ||
1488 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); | ||
1489 | struct mlx5_priv *priv = &dev->priv; | ||
1490 | int err; | ||
1491 | |||
1492 | dev_info(&pdev->dev, "%s was called\n", __func__); | ||
1493 | |||
1495 | err = mlx5_load_one(dev, priv); | 1494 | err = mlx5_load_one(dev, priv); |
1496 | if (err) | 1495 | if (err) |
1497 | dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" | 1496 | dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index f33b997f2b61..af371a82c35b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h | |||
@@ -56,6 +56,7 @@ | |||
56 | #define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) | 56 | #define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) |
57 | 57 | ||
58 | #define MLXSW_PORT_CPU_PORT 0x0 | 58 | #define MLXSW_PORT_CPU_PORT 0x0 |
59 | #define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2) | ||
59 | 60 | ||
60 | #define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) | 61 | #define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) |
61 | 62 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 7ca9201f7dcb..1721098eef13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h | |||
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); | |||
3383 | */ | 3383 | */ |
3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); | 3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); |
3385 | 3385 | ||
3386 | /* reg_ritr_lb_en | ||
3387 | * Loop-back filter enable for unicast packets. | ||
3388 | * If the flag is set then loop-back filter for unicast packets is | ||
3389 | * implemented on the RIF. Multicast packets are always subject to | ||
3390 | * loop-back filtering. | ||
3391 | * Access: RW | ||
3392 | */ | ||
3393 | MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1); | ||
3394 | |||
3386 | /* reg_ritr_virtual_router | 3395 | /* reg_ritr_virtual_router |
3387 | * Virtual router ID associated with the router interface. | 3396 | * Virtual router ID associated with the router interface. |
3388 | * Access: RW | 3397 | * Access: RW |
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, | |||
3484 | mlxsw_reg_ritr_op_set(payload, op); | 3493 | mlxsw_reg_ritr_op_set(payload, op); |
3485 | mlxsw_reg_ritr_rif_set(payload, rif); | 3494 | mlxsw_reg_ritr_rif_set(payload, rif); |
3486 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); | 3495 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); |
3496 | mlxsw_reg_ritr_lb_en_set(payload, 1); | ||
3487 | mlxsw_reg_ritr_mtu_set(payload, mtu); | 3497 | mlxsw_reg_ritr_mtu_set(payload, mtu); |
3488 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); | 3498 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); |
3489 | } | 3499 | } |
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload, | |||
4000 | { | 4010 | { |
4001 | MLXSW_REG_ZERO(ralue, payload); | 4011 | MLXSW_REG_ZERO(ralue, payload); |
4002 | mlxsw_reg_ralue_protocol_set(payload, protocol); | 4012 | mlxsw_reg_ralue_protocol_set(payload, protocol); |
4013 | mlxsw_reg_ralue_op_set(payload, op); | ||
4003 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); | 4014 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); |
4004 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); | 4015 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); |
4005 | mlxsw_reg_ralue_entry_type_set(payload, | 4016 | mlxsw_reg_ralue_entry_type_set(payload, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3e61500819d..d48873bcbddf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <generated/utsrelease.h> | 56 | #include <generated/utsrelease.h> |
57 | #include <net/pkt_cls.h> | 57 | #include <net/pkt_cls.h> |
58 | #include <net/tc_act/tc_mirred.h> | 58 | #include <net/tc_act/tc_mirred.h> |
59 | #include <net/netevent.h> | ||
59 | 60 | ||
60 | #include "spectrum.h" | 61 | #include "spectrum.h" |
61 | #include "core.h" | 62 | #include "core.h" |
@@ -942,8 +943,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) | |||
942 | kfree(mlxsw_sp_vport); | 943 | kfree(mlxsw_sp_vport); |
943 | } | 944 | } |
944 | 945 | ||
945 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | 946 | static int mlxsw_sp_port_add_vid(struct net_device *dev, |
946 | u16 vid) | 947 | __be16 __always_unused proto, u16 vid) |
947 | { | 948 | { |
948 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 949 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
949 | struct mlxsw_sp_port *mlxsw_sp_vport; | 950 | struct mlxsw_sp_port *mlxsw_sp_vport; |
@@ -956,16 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
956 | if (!vid) | 957 | if (!vid) |
957 | return 0; | 958 | return 0; |
958 | 959 | ||
959 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { | 960 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) |
960 | netdev_warn(dev, "VID=%d already configured\n", vid); | ||
961 | return 0; | 961 | return 0; |
962 | } | ||
963 | 962 | ||
964 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); | 963 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); |
965 | if (!mlxsw_sp_vport) { | 964 | if (!mlxsw_sp_vport) |
966 | netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); | ||
967 | return -ENOMEM; | 965 | return -ENOMEM; |
968 | } | ||
969 | 966 | ||
970 | /* When adding the first VLAN interface on a bridged port we need to | 967 | /* When adding the first VLAN interface on a bridged port we need to |
971 | * transition all the active 802.1Q bridge VLANs to use explicit | 968 | * transition all the active 802.1Q bridge VLANs to use explicit |
@@ -973,24 +970,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
973 | */ | 970 | */ |
974 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 971 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { |
975 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); | 972 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); |
976 | if (err) { | 973 | if (err) |
977 | netdev_err(dev, "Failed to set to Virtual mode\n"); | ||
978 | goto err_port_vp_mode_trans; | 974 | goto err_port_vp_mode_trans; |
979 | } | ||
980 | } | 975 | } |
981 | 976 | ||
982 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); | 977 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); |
983 | if (err) { | 978 | if (err) |
984 | netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); | ||
985 | goto err_port_vid_learning_set; | 979 | goto err_port_vid_learning_set; |
986 | } | ||
987 | 980 | ||
988 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); | 981 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); |
989 | if (err) { | 982 | if (err) |
990 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
991 | vid); | ||
992 | goto err_port_add_vid; | 983 | goto err_port_add_vid; |
993 | } | ||
994 | 984 | ||
995 | return 0; | 985 | return 0; |
996 | 986 | ||
@@ -1010,7 +1000,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1010 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 1000 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
1011 | struct mlxsw_sp_port *mlxsw_sp_vport; | 1001 | struct mlxsw_sp_port *mlxsw_sp_vport; |
1012 | struct mlxsw_sp_fid *f; | 1002 | struct mlxsw_sp_fid *f; |
1013 | int err; | ||
1014 | 1003 | ||
1015 | /* VLAN 0 is removed from HW filter when device goes down, but | 1004 | /* VLAN 0 is removed from HW filter when device goes down, but |
1016 | * it is reserved in our case, so simply return. | 1005 | * it is reserved in our case, so simply return. |
@@ -1019,23 +1008,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1019 | return 0; | 1008 | return 0; |
1020 | 1009 | ||
1021 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); | 1010 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); |
1022 | if (!mlxsw_sp_vport) { | 1011 | if (WARN_ON(!mlxsw_sp_vport)) |
1023 | netdev_warn(dev, "VID=%d does not exist\n", vid); | ||
1024 | return 0; | 1012 | return 0; |
1025 | } | ||
1026 | 1013 | ||
1027 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); | 1014 | mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); |
1028 | if (err) { | ||
1029 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
1030 | vid); | ||
1031 | return err; | ||
1032 | } | ||
1033 | 1015 | ||
1034 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); | 1016 | mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); |
1035 | if (err) { | ||
1036 | netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); | ||
1037 | return err; | ||
1038 | } | ||
1039 | 1017 | ||
1040 | /* Drop FID reference. If this was the last reference the | 1018 | /* Drop FID reference. If this was the last reference the |
1041 | * resources will be freed. | 1019 | * resources will be freed. |
@@ -1048,13 +1026,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1048 | * transition all active 802.1Q bridge VLANs to use VID to FID | 1026 | * transition all active 802.1Q bridge VLANs to use VID to FID |
1049 | * mappings and set port's mode to VLAN mode. | 1027 | * mappings and set port's mode to VLAN mode. |
1050 | */ | 1028 | */ |
1051 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 1029 | if (list_is_singular(&mlxsw_sp_port->vports_list)) |
1052 | err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); | 1030 | mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); |
1053 | if (err) { | ||
1054 | netdev_err(dev, "Failed to set to VLAN mode\n"); | ||
1055 | return err; | ||
1056 | } | ||
1057 | } | ||
1058 | 1031 | ||
1059 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); | 1032 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); |
1060 | 1033 | ||
@@ -1149,6 +1122,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1149 | bool ingress) | 1122 | bool ingress) |
1150 | { | 1123 | { |
1151 | const struct tc_action *a; | 1124 | const struct tc_action *a; |
1125 | LIST_HEAD(actions); | ||
1152 | int err; | 1126 | int err; |
1153 | 1127 | ||
1154 | if (!tc_single_action(cls->exts)) { | 1128 | if (!tc_single_action(cls->exts)) { |
@@ -1156,7 +1130,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1156 | return -ENOTSUPP; | 1130 | return -ENOTSUPP; |
1157 | } | 1131 | } |
1158 | 1132 | ||
1159 | tc_for_each_action(a, cls->exts) { | 1133 | tcf_exts_to_list(cls->exts, &actions); |
1134 | list_for_each_entry(a, &actions, list) { | ||
1160 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) | 1135 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) |
1161 | return -ENOTSUPP; | 1136 | return -ENOTSUPP; |
1162 | 1137 | ||
@@ -2076,6 +2051,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
2076 | return 0; | 2051 | return 0; |
2077 | } | 2052 | } |
2078 | 2053 | ||
2054 | static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2055 | { | ||
2056 | mlxsw_sp_port->pvid = 1; | ||
2057 | |||
2058 | return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); | ||
2059 | } | ||
2060 | |||
2061 | static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2062 | { | ||
2063 | return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2064 | } | ||
2065 | |||
2079 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 2066 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
2080 | bool split, u8 module, u8 width, u8 lane) | 2067 | bool split, u8 module, u8 width, u8 lane) |
2081 | { | 2068 | { |
@@ -2119,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2119 | dev->netdev_ops = &mlxsw_sp_port_netdev_ops; | 2106 | dev->netdev_ops = &mlxsw_sp_port_netdev_ops; |
2120 | dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; | 2107 | dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; |
2121 | 2108 | ||
2109 | err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); | ||
2110 | if (err) { | ||
2111 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", | ||
2112 | mlxsw_sp_port->local_port); | ||
2113 | goto err_port_swid_set; | ||
2114 | } | ||
2115 | |||
2122 | err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); | 2116 | err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); |
2123 | if (err) { | 2117 | if (err) { |
2124 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", | 2118 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", |
@@ -2144,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2144 | goto err_port_system_port_mapping_set; | 2138 | goto err_port_system_port_mapping_set; |
2145 | } | 2139 | } |
2146 | 2140 | ||
2147 | err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); | ||
2148 | if (err) { | ||
2149 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", | ||
2150 | mlxsw_sp_port->local_port); | ||
2151 | goto err_port_swid_set; | ||
2152 | } | ||
2153 | |||
2154 | err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); | 2141 | err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); |
2155 | if (err) { | 2142 | if (err) { |
2156 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", | 2143 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", |
@@ -2191,7 +2178,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2191 | goto err_port_dcb_init; | 2178 | goto err_port_dcb_init; |
2192 | } | 2179 | } |
2193 | 2180 | ||
2181 | err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); | ||
2182 | if (err) { | ||
2183 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", | ||
2184 | mlxsw_sp_port->local_port); | ||
2185 | goto err_port_pvid_vport_create; | ||
2186 | } | ||
2187 | |||
2194 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); | 2188 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); |
2189 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2195 | err = register_netdev(dev); | 2190 | err = register_netdev(dev); |
2196 | if (err) { | 2191 | if (err) { |
2197 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", | 2192 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", |
@@ -2208,27 +2203,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2208 | goto err_core_port_init; | 2203 | goto err_core_port_init; |
2209 | } | 2204 | } |
2210 | 2205 | ||
2211 | err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); | ||
2212 | if (err) | ||
2213 | goto err_port_vlan_init; | ||
2214 | |||
2215 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2216 | return 0; | 2206 | return 0; |
2217 | 2207 | ||
2218 | err_port_vlan_init: | ||
2219 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | ||
2220 | err_core_port_init: | 2208 | err_core_port_init: |
2221 | unregister_netdev(dev); | 2209 | unregister_netdev(dev); |
2222 | err_register_netdev: | 2210 | err_register_netdev: |
2211 | mlxsw_sp->ports[local_port] = NULL; | ||
2212 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | ||
2213 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2214 | err_port_pvid_vport_create: | ||
2215 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2223 | err_port_dcb_init: | 2216 | err_port_dcb_init: |
2224 | err_port_ets_init: | 2217 | err_port_ets_init: |
2225 | err_port_buffers_init: | 2218 | err_port_buffers_init: |
2226 | err_port_admin_status_set: | 2219 | err_port_admin_status_set: |
2227 | err_port_mtu_set: | 2220 | err_port_mtu_set: |
2228 | err_port_speed_by_width_set: | 2221 | err_port_speed_by_width_set: |
2229 | err_port_swid_set: | ||
2230 | err_port_system_port_mapping_set: | 2222 | err_port_system_port_mapping_set: |
2231 | err_dev_addr_init: | 2223 | err_dev_addr_init: |
2224 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | ||
2225 | err_port_swid_set: | ||
2232 | free_percpu(mlxsw_sp_port->pcpu_stats); | 2226 | free_percpu(mlxsw_sp_port->pcpu_stats); |
2233 | err_alloc_stats: | 2227 | err_alloc_stats: |
2234 | kfree(mlxsw_sp_port->untagged_vlans); | 2228 | kfree(mlxsw_sp_port->untagged_vlans); |
@@ -2245,12 +2239,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) | |||
2245 | 2239 | ||
2246 | if (!mlxsw_sp_port) | 2240 | if (!mlxsw_sp_port) |
2247 | return; | 2241 | return; |
2248 | mlxsw_sp->ports[local_port] = NULL; | ||
2249 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | 2242 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); |
2250 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ | 2243 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ |
2251 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | 2244 | mlxsw_sp->ports[local_port] = NULL; |
2252 | mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2253 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | 2245 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); |
2246 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2247 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2254 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | 2248 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); |
2255 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); | 2249 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); |
2256 | free_percpu(mlxsw_sp_port->pcpu_stats); | 2250 | free_percpu(mlxsw_sp_port->pcpu_stats); |
@@ -2662,6 +2656,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { | |||
2662 | { | 2656 | { |
2663 | .func = mlxsw_sp_rx_listener_func, | 2657 | .func = mlxsw_sp_rx_listener_func, |
2664 | .local_port = MLXSW_PORT_DONT_CARE, | 2658 | .local_port = MLXSW_PORT_DONT_CARE, |
2659 | .trap_id = MLXSW_TRAP_ID_MTUERROR, | ||
2660 | }, | ||
2661 | { | ||
2662 | .func = mlxsw_sp_rx_listener_func, | ||
2663 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2664 | .trap_id = MLXSW_TRAP_ID_TTLERROR, | ||
2665 | }, | ||
2666 | { | ||
2667 | .func = mlxsw_sp_rx_listener_func, | ||
2668 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2669 | .trap_id = MLXSW_TRAP_ID_LBERROR, | ||
2670 | }, | ||
2671 | { | ||
2672 | .func = mlxsw_sp_rx_listener_func, | ||
2673 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2674 | .trap_id = MLXSW_TRAP_ID_OSPF, | ||
2675 | }, | ||
2676 | { | ||
2677 | .func = mlxsw_sp_rx_listener_func, | ||
2678 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2665 | .trap_id = MLXSW_TRAP_ID_IP2ME, | 2679 | .trap_id = MLXSW_TRAP_ID_IP2ME, |
2666 | }, | 2680 | }, |
2667 | { | 2681 | { |
@@ -3311,6 +3325,39 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, | |||
3311 | return mlxsw_sp_fid_find(mlxsw_sp, fid); | 3325 | return mlxsw_sp_fid_find(mlxsw_sp, fid); |
3312 | } | 3326 | } |
3313 | 3327 | ||
3328 | static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) | ||
3329 | { | ||
3330 | return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : | ||
3331 | MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; | ||
3332 | } | ||
3333 | |||
3334 | static u16 mlxsw_sp_flood_table_index_get(u16 fid) | ||
3335 | { | ||
3336 | return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; | ||
3337 | } | ||
3338 | |||
3339 | static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, | ||
3340 | bool set) | ||
3341 | { | ||
3342 | enum mlxsw_flood_table_type table_type; | ||
3343 | char *sftr_pl; | ||
3344 | u16 index; | ||
3345 | int err; | ||
3346 | |||
3347 | sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); | ||
3348 | if (!sftr_pl) | ||
3349 | return -ENOMEM; | ||
3350 | |||
3351 | table_type = mlxsw_sp_flood_table_type_get(fid); | ||
3352 | index = mlxsw_sp_flood_table_index_get(fid); | ||
3353 | mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type, | ||
3354 | 1, MLXSW_PORT_ROUTER_PORT, set); | ||
3355 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); | ||
3356 | |||
3357 | kfree(sftr_pl); | ||
3358 | return err; | ||
3359 | } | ||
3360 | |||
3314 | static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) | 3361 | static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) |
3315 | { | 3362 | { |
3316 | if (mlxsw_sp_fid_is_vfid(fid)) | 3363 | if (mlxsw_sp_fid_is_vfid(fid)) |
@@ -3347,10 +3394,14 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, | |||
3347 | if (rif == MLXSW_SP_RIF_MAX) | 3394 | if (rif == MLXSW_SP_RIF_MAX) |
3348 | return -ERANGE; | 3395 | return -ERANGE; |
3349 | 3396 | ||
3350 | err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); | 3397 | err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); |
3351 | if (err) | 3398 | if (err) |
3352 | return err; | 3399 | return err; |
3353 | 3400 | ||
3401 | err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); | ||
3402 | if (err) | ||
3403 | goto err_rif_bridge_op; | ||
3404 | |||
3354 | err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); | 3405 | err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); |
3355 | if (err) | 3406 | if (err) |
3356 | goto err_rif_fdb_op; | 3407 | goto err_rif_fdb_op; |
@@ -3372,6 +3423,8 @@ err_rif_alloc: | |||
3372 | mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); | 3423 | mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); |
3373 | err_rif_fdb_op: | 3424 | err_rif_fdb_op: |
3374 | mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); | 3425 | mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); |
3426 | err_rif_bridge_op: | ||
3427 | mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); | ||
3375 | return err; | 3428 | return err; |
3376 | } | 3429 | } |
3377 | 3430 | ||
@@ -3391,6 +3444,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, | |||
3391 | 3444 | ||
3392 | mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); | 3445 | mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); |
3393 | 3446 | ||
3447 | mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); | ||
3448 | |||
3394 | netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); | 3449 | netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); |
3395 | } | 3450 | } |
3396 | 3451 | ||
@@ -4487,18 +4542,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { | |||
4487 | .priority = 10, /* Must be called before FIB notifier block */ | 4542 | .priority = 10, /* Must be called before FIB notifier block */ |
4488 | }; | 4543 | }; |
4489 | 4544 | ||
4545 | static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { | ||
4546 | .notifier_call = mlxsw_sp_router_netevent_event, | ||
4547 | }; | ||
4548 | |||
4490 | static int __init mlxsw_sp_module_init(void) | 4549 | static int __init mlxsw_sp_module_init(void) |
4491 | { | 4550 | { |
4492 | int err; | 4551 | int err; |
4493 | 4552 | ||
4494 | register_netdevice_notifier(&mlxsw_sp_netdevice_nb); | 4553 | register_netdevice_notifier(&mlxsw_sp_netdevice_nb); |
4495 | register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); | 4554 | register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); |
4555 | register_netevent_notifier(&mlxsw_sp_router_netevent_nb); | ||
4556 | |||
4496 | err = mlxsw_core_driver_register(&mlxsw_sp_driver); | 4557 | err = mlxsw_core_driver_register(&mlxsw_sp_driver); |
4497 | if (err) | 4558 | if (err) |
4498 | goto err_core_driver_register; | 4559 | goto err_core_driver_register; |
4499 | return 0; | 4560 | return 0; |
4500 | 4561 | ||
4501 | err_core_driver_register: | 4562 | err_core_driver_register: |
4563 | unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); | ||
4564 | unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); | ||
4502 | unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); | 4565 | unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); |
4503 | return err; | 4566 | return err; |
4504 | } | 4567 | } |
@@ -4506,6 +4569,7 @@ err_core_driver_register: | |||
4506 | static void __exit mlxsw_sp_module_exit(void) | 4569 | static void __exit mlxsw_sp_module_exit(void) |
4507 | { | 4570 | { |
4508 | mlxsw_core_driver_unregister(&mlxsw_sp_driver); | 4571 | mlxsw_core_driver_unregister(&mlxsw_sp_driver); |
4572 | unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); | ||
4509 | unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); | 4573 | unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); |
4510 | unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); | 4574 | unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); |
4511 | } | 4575 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f69aa37d1521..ac48abebe904 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
536 | u16 vid); | 536 | u16 vid); |
537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, | 537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, |
538 | u16 vid_end, bool is_member, bool untagged); | 538 | u16 vid_end, bool is_member, bool untagged); |
539 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | ||
540 | u16 vid); | ||
541 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, | 539 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, |
542 | bool set); | 540 | bool set); |
543 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); | 541 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); |
@@ -589,6 +587,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev, | |||
589 | struct neighbour *n); | 587 | struct neighbour *n); |
590 | void mlxsw_sp_router_neigh_destroy(struct net_device *dev, | 588 | void mlxsw_sp_router_neigh_destroy(struct net_device *dev, |
591 | struct neighbour *n); | 589 | struct neighbour *n); |
590 | int mlxsw_sp_router_netevent_event(struct notifier_block *unused, | ||
591 | unsigned long event, void *ptr); | ||
592 | 592 | ||
593 | int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); | 593 | int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); |
594 | void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); | 594 | void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 074cdda7b6f3..953b214f38d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { | |||
330 | MLXSW_SP_CPU_PORT_SB_CM, | 330 | MLXSW_SP_CPU_PORT_SB_CM, |
331 | MLXSW_SP_CPU_PORT_SB_CM, | 331 | MLXSW_SP_CPU_PORT_SB_CM, |
332 | MLXSW_SP_CPU_PORT_SB_CM, | 332 | MLXSW_SP_CPU_PORT_SB_CM, |
333 | MLXSW_SP_CPU_PORT_SB_CM, | 333 | MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), |
334 | MLXSW_SP_CPU_PORT_SB_CM, | 334 | MLXSW_SP_CPU_PORT_SB_CM, |
335 | MLXSW_SP_CPU_PORT_SB_CM, | 335 | MLXSW_SP_CPU_PORT_SB_CM, |
336 | MLXSW_SP_CPU_PORT_SB_CM, | 336 | MLXSW_SP_CPU_PORT_SB_CM, |
@@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, | |||
717 | u8 local_port = mlxsw_sp_port->local_port; | 717 | u8 local_port = mlxsw_sp_port->local_port; |
718 | u8 pg_buff = tc_index; | 718 | u8 pg_buff = tc_index; |
719 | enum mlxsw_reg_sbxx_dir dir = pool_type; | 719 | enum mlxsw_reg_sbxx_dir dir = pool_type; |
720 | u8 pool = pool_index; | 720 | u8 pool = pool_get(pool_index); |
721 | u32 max_buff; | 721 | u32 max_buff; |
722 | int err; | 722 | int err; |
723 | 723 | ||
724 | if (dir != dir_get(pool_index)) | ||
725 | return -EINVAL; | ||
726 | |||
724 | err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, | 727 | err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, |
725 | threshold, &max_buff); | 728 | threshold, &max_buff); |
726 | if (err) | 729 | if (err) |
727 | return err; | 730 | return err; |
728 | 731 | ||
729 | if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) { | ||
730 | if (pool < MLXSW_SP_SB_POOL_COUNT) | ||
731 | return -EINVAL; | ||
732 | pool -= MLXSW_SP_SB_POOL_COUNT; | ||
733 | } else if (pool >= MLXSW_SP_SB_POOL_COUNT) { | ||
734 | return -EINVAL; | ||
735 | } | ||
736 | return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, | 732 | return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, |
737 | 0, max_buff, pool); | 733 | 0, max_buff, pool); |
738 | } | 734 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb7512827..b6ed7f7c531e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | |||
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; | 341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; |
342 | 342 | ||
343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); | 343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); |
344 | mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); | ||
345 | mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); | ||
344 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); | 346 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); |
345 | 347 | ||
346 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), | 348 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), |
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
351 | struct ieee_pfc *pfc) | 353 | struct ieee_pfc *pfc) |
352 | { | 354 | { |
353 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 355 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
356 | bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); | ||
354 | int err; | 357 | int err; |
355 | 358 | ||
356 | if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && | 359 | if (pause_en && pfc->pfc_en) { |
357 | pfc->pfc_en) { | ||
358 | netdev_err(dev, "PAUSE frames already enabled on port\n"); | 360 | netdev_err(dev, "PAUSE frames already enabled on port\n"); |
359 | return -EINVAL; | 361 | return -EINVAL; |
360 | } | 362 | } |
361 | 363 | ||
362 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 364 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
363 | mlxsw_sp_port->dcb.ets->prio_tc, | 365 | mlxsw_sp_port->dcb.ets->prio_tc, |
364 | false, pfc); | 366 | pause_en, pfc); |
365 | if (err) { | 367 | if (err) { |
366 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); | 368 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); |
367 | return err; | 369 | return err; |
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
380 | 382 | ||
381 | err_port_pfc_set: | 383 | err_port_pfc_set: |
382 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 384 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
383 | mlxsw_sp_port->dcb.ets->prio_tc, false, | 385 | mlxsw_sp_port->dcb.ets->prio_tc, pause_en, |
384 | mlxsw_sp_port->dcb.pfc); | 386 | mlxsw_sp_port->dcb.pfc); |
385 | return err; | 387 | return err; |
386 | } | 388 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 81418d629231..3f5c51da6d3e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage, | |||
107 | } | 107 | } |
108 | 108 | ||
109 | struct mlxsw_sp_fib_key { | 109 | struct mlxsw_sp_fib_key { |
110 | struct net_device *dev; | ||
110 | unsigned char addr[sizeof(struct in6_addr)]; | 111 | unsigned char addr[sizeof(struct in6_addr)]; |
111 | unsigned char prefix_len; | 112 | unsigned char prefix_len; |
112 | }; | 113 | }; |
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry { | |||
123 | struct rhash_head ht_node; | 124 | struct rhash_head ht_node; |
124 | struct mlxsw_sp_fib_key key; | 125 | struct mlxsw_sp_fib_key key; |
125 | enum mlxsw_sp_fib_entry_type type; | 126 | enum mlxsw_sp_fib_entry_type type; |
126 | u8 added:1; | 127 | unsigned int ref_count; |
127 | u16 rif; /* used for action local */ | 128 | u16 rif; /* used for action local */ |
128 | struct mlxsw_sp_vr *vr; | 129 | struct mlxsw_sp_vr *vr; |
129 | struct list_head nexthop_group_node; | 130 | struct list_head nexthop_group_node; |
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib, | |||
171 | 172 | ||
172 | static struct mlxsw_sp_fib_entry * | 173 | static struct mlxsw_sp_fib_entry * |
173 | mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, | 174 | mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, |
174 | size_t addr_len, unsigned char prefix_len) | 175 | size_t addr_len, unsigned char prefix_len, |
176 | struct net_device *dev) | ||
175 | { | 177 | { |
176 | struct mlxsw_sp_fib_entry *fib_entry; | 178 | struct mlxsw_sp_fib_entry *fib_entry; |
177 | 179 | ||
178 | fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); | 180 | fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); |
179 | if (!fib_entry) | 181 | if (!fib_entry) |
180 | return NULL; | 182 | return NULL; |
183 | fib_entry->key.dev = dev; | ||
181 | memcpy(fib_entry->key.addr, addr, addr_len); | 184 | memcpy(fib_entry->key.addr, addr, addr_len); |
182 | fib_entry->key.prefix_len = prefix_len; | 185 | fib_entry->key.prefix_len = prefix_len; |
183 | return fib_entry; | 186 | return fib_entry; |
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry) | |||
190 | 193 | ||
191 | static struct mlxsw_sp_fib_entry * | 194 | static struct mlxsw_sp_fib_entry * |
192 | mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, | 195 | mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, |
193 | size_t addr_len, unsigned char prefix_len) | 196 | size_t addr_len, unsigned char prefix_len, |
197 | struct net_device *dev) | ||
194 | { | 198 | { |
195 | struct mlxsw_sp_fib_key key = {{ 0 } }; | 199 | struct mlxsw_sp_fib_key key; |
196 | 200 | ||
201 | memset(&key, 0, sizeof(key)); | ||
202 | key.dev = dev; | ||
197 | memcpy(key.addr, addr, addr_len); | 203 | memcpy(key.addr, addr, addr_len); |
198 | key.prefix_len = prefix_len; | 204 | key.prefix_len = prefix_len; |
199 | return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); | 205 | return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); |
@@ -657,7 +663,7 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev, | |||
657 | return 0; | 663 | return 0; |
658 | } | 664 | } |
659 | 665 | ||
660 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); | 666 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); |
661 | if (WARN_ON(!r)) | 667 | if (WARN_ON(!r)) |
662 | return -EINVAL; | 668 | return -EINVAL; |
663 | 669 | ||
@@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) | |||
938 | mlxsw_sp_port_dev_put(mlxsw_sp_port); | 944 | mlxsw_sp_port_dev_put(mlxsw_sp_port); |
939 | } | 945 | } |
940 | 946 | ||
941 | static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, | 947 | int mlxsw_sp_router_netevent_event(struct notifier_block *unused, |
942 | unsigned long event, void *ptr) | 948 | unsigned long event, void *ptr) |
943 | { | 949 | { |
944 | struct mlxsw_sp_neigh_entry *neigh_entry; | 950 | struct mlxsw_sp_neigh_entry *neigh_entry; |
945 | struct mlxsw_sp_port *mlxsw_sp_port; | 951 | struct mlxsw_sp_port *mlxsw_sp_port; |
@@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, | |||
1009 | return NOTIFY_DONE; | 1015 | return NOTIFY_DONE; |
1010 | } | 1016 | } |
1011 | 1017 | ||
1012 | static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { | ||
1013 | .notifier_call = mlxsw_sp_router_netevent_event, | ||
1014 | }; | ||
1015 | |||
1016 | static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) | 1018 | static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) |
1017 | { | 1019 | { |
1018 | int err; | 1020 | int err; |
@@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) | |||
1027 | */ | 1029 | */ |
1028 | mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); | 1030 | mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); |
1029 | 1031 | ||
1030 | err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb); | ||
1031 | if (err) | ||
1032 | goto err_register_netevent_notifier; | ||
1033 | |||
1034 | /* Create the delayed works for the activity_update */ | 1032 | /* Create the delayed works for the activity_update */ |
1035 | INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw, | 1033 | INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw, |
1036 | mlxsw_sp_router_neighs_update_work); | 1034 | mlxsw_sp_router_neighs_update_work); |
@@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) | |||
1039 | mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0); | 1037 | mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0); |
1040 | mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0); | 1038 | mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0); |
1041 | return 0; | 1039 | return 0; |
1042 | |||
1043 | err_register_netevent_notifier: | ||
1044 | rhashtable_destroy(&mlxsw_sp->router.neigh_ht); | ||
1045 | return err; | ||
1046 | } | 1040 | } |
1047 | 1041 | ||
1048 | static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) | 1042 | static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) |
1049 | { | 1043 | { |
1050 | cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw); | 1044 | cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw); |
1051 | cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw); | 1045 | cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw); |
1052 | unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); | ||
1053 | rhashtable_destroy(&mlxsw_sp->router.neigh_ht); | 1046 | rhashtable_destroy(&mlxsw_sp->router.neigh_ht); |
1054 | } | 1047 | } |
1055 | 1048 | ||
@@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) | |||
1524 | return err; | 1517 | return err; |
1525 | mlxsw_sp_lpm_init(mlxsw_sp); | 1518 | mlxsw_sp_lpm_init(mlxsw_sp); |
1526 | mlxsw_sp_vrs_init(mlxsw_sp); | 1519 | mlxsw_sp_vrs_init(mlxsw_sp); |
1527 | return mlxsw_sp_neigh_init(mlxsw_sp); | 1520 | err = mlxsw_sp_neigh_init(mlxsw_sp); |
1521 | if (err) | ||
1522 | goto err_neigh_init; | ||
1523 | return 0; | ||
1524 | |||
1525 | err_neigh_init: | ||
1526 | __mlxsw_sp_router_fini(mlxsw_sp); | ||
1527 | return err; | ||
1528 | } | 1528 | } |
1529 | 1529 | ||
1530 | void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) | 1530 | void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) |
@@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, | |||
1626 | static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, | 1626 | static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, |
1627 | struct mlxsw_sp_fib_entry *fib_entry) | 1627 | struct mlxsw_sp_fib_entry *fib_entry) |
1628 | { | 1628 | { |
1629 | enum mlxsw_reg_ralue_op op; | 1629 | return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, |
1630 | 1630 | MLXSW_REG_RALUE_OP_WRITE_WRITE); | |
1631 | op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE : | ||
1632 | MLXSW_REG_RALUE_OP_WRITE_UPDATE; | ||
1633 | return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op); | ||
1634 | } | 1631 | } |
1635 | 1632 | ||
1636 | static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, | 1633 | static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, |
@@ -1651,9 +1648,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) | |||
1651 | const struct mlxsw_sp_router_fib4_add_info *info = data; | 1648 | const struct mlxsw_sp_router_fib4_add_info *info = data; |
1652 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; | 1649 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; |
1653 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; | 1650 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; |
1651 | struct mlxsw_sp_vr *vr = fib_entry->vr; | ||
1654 | 1652 | ||
1655 | mlxsw_sp_fib_entry_destroy(fib_entry); | 1653 | mlxsw_sp_fib_entry_destroy(fib_entry); |
1656 | mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); | 1654 | mlxsw_sp_vr_put(mlxsw_sp, vr); |
1657 | kfree(info); | 1655 | kfree(info); |
1658 | } | 1656 | } |
1659 | 1657 | ||
@@ -1694,34 +1692,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp, | |||
1694 | mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); | 1692 | mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); |
1695 | } | 1693 | } |
1696 | 1694 | ||
1697 | static int | 1695 | static struct mlxsw_sp_fib_entry * |
1698 | mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port, | 1696 | mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, |
1699 | const struct switchdev_obj_ipv4_fib *fib4, | 1697 | const struct switchdev_obj_ipv4_fib *fib4) |
1700 | struct switchdev_trans *trans) | ||
1701 | { | 1698 | { |
1702 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
1703 | struct mlxsw_sp_router_fib4_add_info *info; | ||
1704 | struct mlxsw_sp_fib_entry *fib_entry; | 1699 | struct mlxsw_sp_fib_entry *fib_entry; |
1700 | struct fib_info *fi = fib4->fi; | ||
1705 | struct mlxsw_sp_vr *vr; | 1701 | struct mlxsw_sp_vr *vr; |
1706 | int err; | 1702 | int err; |
1707 | 1703 | ||
1708 | vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id, | 1704 | vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id, |
1709 | MLXSW_SP_L3_PROTO_IPV4); | 1705 | MLXSW_SP_L3_PROTO_IPV4); |
1710 | if (IS_ERR(vr)) | 1706 | if (IS_ERR(vr)) |
1711 | return PTR_ERR(vr); | 1707 | return ERR_CAST(vr); |
1712 | 1708 | ||
1709 | fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst, | ||
1710 | sizeof(fib4->dst), | ||
1711 | fib4->dst_len, fi->fib_dev); | ||
1712 | if (fib_entry) { | ||
1713 | /* Already exists, just take a reference */ | ||
1714 | fib_entry->ref_count++; | ||
1715 | return fib_entry; | ||
1716 | } | ||
1713 | fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst, | 1717 | fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst, |
1714 | sizeof(fib4->dst), fib4->dst_len); | 1718 | sizeof(fib4->dst), |
1719 | fib4->dst_len, fi->fib_dev); | ||
1715 | if (!fib_entry) { | 1720 | if (!fib_entry) { |
1716 | err = -ENOMEM; | 1721 | err = -ENOMEM; |
1717 | goto err_fib_entry_create; | 1722 | goto err_fib_entry_create; |
1718 | } | 1723 | } |
1719 | fib_entry->vr = vr; | 1724 | fib_entry->vr = vr; |
1725 | fib_entry->ref_count = 1; | ||
1720 | 1726 | ||
1721 | err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry); | 1727 | err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry); |
1722 | if (err) | 1728 | if (err) |
1723 | goto err_fib4_entry_init; | 1729 | goto err_fib4_entry_init; |
1724 | 1730 | ||
1731 | return fib_entry; | ||
1732 | |||
1733 | err_fib4_entry_init: | ||
1734 | mlxsw_sp_fib_entry_destroy(fib_entry); | ||
1735 | err_fib_entry_create: | ||
1736 | mlxsw_sp_vr_put(mlxsw_sp, vr); | ||
1737 | |||
1738 | return ERR_PTR(err); | ||
1739 | } | ||
1740 | |||
1741 | static struct mlxsw_sp_fib_entry * | ||
1742 | mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp, | ||
1743 | const struct switchdev_obj_ipv4_fib *fib4) | ||
1744 | { | ||
1745 | struct mlxsw_sp_vr *vr; | ||
1746 | |||
1747 | vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4); | ||
1748 | if (!vr) | ||
1749 | return NULL; | ||
1750 | |||
1751 | return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst, | ||
1752 | sizeof(fib4->dst), fib4->dst_len, | ||
1753 | fib4->fi->fib_dev); | ||
1754 | } | ||
1755 | |||
1756 | void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp, | ||
1757 | struct mlxsw_sp_fib_entry *fib_entry) | ||
1758 | { | ||
1759 | struct mlxsw_sp_vr *vr = fib_entry->vr; | ||
1760 | |||
1761 | if (--fib_entry->ref_count == 0) { | ||
1762 | mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); | ||
1763 | mlxsw_sp_fib_entry_destroy(fib_entry); | ||
1764 | } | ||
1765 | mlxsw_sp_vr_put(mlxsw_sp, vr); | ||
1766 | } | ||
1767 | |||
1768 | static int | ||
1769 | mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port, | ||
1770 | const struct switchdev_obj_ipv4_fib *fib4, | ||
1771 | struct switchdev_trans *trans) | ||
1772 | { | ||
1773 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
1774 | struct mlxsw_sp_router_fib4_add_info *info; | ||
1775 | struct mlxsw_sp_fib_entry *fib_entry; | ||
1776 | int err; | ||
1777 | |||
1778 | fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4); | ||
1779 | if (IS_ERR(fib_entry)) | ||
1780 | return PTR_ERR(fib_entry); | ||
1781 | |||
1725 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 1782 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
1726 | if (!info) { | 1783 | if (!info) { |
1727 | err = -ENOMEM; | 1784 | err = -ENOMEM; |
@@ -1735,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1735 | return 0; | 1792 | return 0; |
1736 | 1793 | ||
1737 | err_alloc_info: | 1794 | err_alloc_info: |
1738 | mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); | 1795 | mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); |
1739 | err_fib4_entry_init: | ||
1740 | mlxsw_sp_fib_entry_destroy(fib_entry); | ||
1741 | err_fib_entry_create: | ||
1742 | mlxsw_sp_vr_put(mlxsw_sp, vr); | ||
1743 | return err; | 1796 | return err; |
1744 | } | 1797 | } |
1745 | 1798 | ||
@@ -1758,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1758 | fib_entry = info->fib_entry; | 1811 | fib_entry = info->fib_entry; |
1759 | kfree(info); | 1812 | kfree(info); |
1760 | 1813 | ||
1814 | if (fib_entry->ref_count != 1) | ||
1815 | return 0; | ||
1816 | |||
1761 | vr = fib_entry->vr; | 1817 | vr = fib_entry->vr; |
1762 | err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry); | 1818 | err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry); |
1763 | if (err) | 1819 | if (err) |
1764 | goto err_fib_entry_insert; | 1820 | goto err_fib_entry_insert; |
1765 | err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); | 1821 | err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry); |
1766 | if (err) | 1822 | if (err) |
1767 | goto err_fib_entry_add; | 1823 | goto err_fib_entry_add; |
1768 | return 0; | 1824 | return 0; |
@@ -1770,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1770 | err_fib_entry_add: | 1826 | err_fib_entry_add: |
1771 | mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); | 1827 | mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); |
1772 | err_fib_entry_insert: | 1828 | err_fib_entry_insert: |
1773 | mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); | 1829 | mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); |
1774 | mlxsw_sp_fib_entry_destroy(fib_entry); | ||
1775 | mlxsw_sp_vr_put(mlxsw_sp, vr); | ||
1776 | return err; | 1830 | return err; |
1777 | } | 1831 | } |
1778 | 1832 | ||
@@ -1792,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1792 | { | 1846 | { |
1793 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 1847 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1794 | struct mlxsw_sp_fib_entry *fib_entry; | 1848 | struct mlxsw_sp_fib_entry *fib_entry; |
1795 | struct mlxsw_sp_vr *vr; | ||
1796 | 1849 | ||
1797 | vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4); | 1850 | fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4); |
1798 | if (!vr) { | ||
1799 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n"); | ||
1800 | return -ENOENT; | ||
1801 | } | ||
1802 | fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst, | ||
1803 | sizeof(fib4->dst), fib4->dst_len); | ||
1804 | if (!fib_entry) { | 1851 | if (!fib_entry) { |
1805 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); | 1852 | dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); |
1806 | return -ENOENT; | 1853 | return -ENOENT; |
1807 | } | 1854 | } |
1808 | mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry); | 1855 | |
1809 | mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); | 1856 | if (fib_entry->ref_count == 1) { |
1810 | mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); | 1857 | mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); |
1811 | mlxsw_sp_fib_entry_destroy(fib_entry); | 1858 | mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry); |
1812 | mlxsw_sp_vr_put(mlxsw_sp, vr); | 1859 | } |
1860 | |||
1861 | mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); | ||
1813 | return 0; | 1862 | return 0; |
1814 | } | 1863 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a1ad5e6bdfa8..7b654c517b91 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
167 | } | 167 | } |
168 | 168 | ||
169 | static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, | 169 | static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, |
170 | u16 idx_begin, u16 idx_end, bool set, | 170 | u16 idx_begin, u16 idx_end, bool uc_set, |
171 | bool only_uc) | 171 | bool bm_set) |
172 | { | 172 | { |
173 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 173 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
174 | u16 local_port = mlxsw_sp_port->local_port; | 174 | u16 local_port = mlxsw_sp_port->local_port; |
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
187 | return -ENOMEM; | 187 | return -ENOMEM; |
188 | 188 | ||
189 | mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, | 189 | mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, |
190 | table_type, range, local_port, set); | 190 | table_type, range, local_port, uc_set); |
191 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); | 191 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); |
192 | if (err) | 192 | if (err) |
193 | goto buffer_out; | 193 | goto buffer_out; |
194 | 194 | ||
195 | /* Flooding control allows one to decide whether a given port will | ||
196 | * flood unicast traffic for which there is no FDB entry. | ||
197 | */ | ||
198 | if (only_uc) | ||
199 | goto buffer_out; | ||
200 | |||
201 | mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, | 195 | mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, |
202 | table_type, range, local_port, set); | 196 | table_type, range, local_port, bm_set); |
203 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); | 197 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); |
204 | if (err) | 198 | if (err) |
205 | goto err_flood_bm_set; | 199 | goto err_flood_bm_set; |
206 | else | 200 | |
207 | goto buffer_out; | 201 | goto buffer_out; |
208 | 202 | ||
209 | err_flood_bm_set: | 203 | err_flood_bm_set: |
210 | mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, | 204 | mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, |
211 | table_type, range, local_port, !set); | 205 | table_type, range, local_port, !uc_set); |
212 | mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); | 206 | mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); |
213 | buffer_out: | 207 | buffer_out: |
214 | kfree(sftr_pl); | 208 | kfree(sftr_pl); |
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, | |||
257 | * the start of the vFIDs range. | 251 | * the start of the vFIDs range. |
258 | */ | 252 | */ |
259 | vfid = mlxsw_sp_fid_to_vfid(fid); | 253 | vfid = mlxsw_sp_fid_to_vfid(fid); |
260 | return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, | 254 | return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set); |
261 | false); | ||
262 | } | 255 | } |
263 | 256 | ||
264 | static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, | 257 | static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, |
@@ -450,6 +443,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) | |||
450 | 443 | ||
451 | kfree(f); | 444 | kfree(f); |
452 | 445 | ||
446 | mlxsw_sp_fid_map(mlxsw_sp, fid, false); | ||
447 | |||
453 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); | 448 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); |
454 | } | 449 | } |
455 | 450 | ||
@@ -458,6 +453,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, | |||
458 | { | 453 | { |
459 | struct mlxsw_sp_fid *f; | 454 | struct mlxsw_sp_fid *f; |
460 | 455 | ||
456 | if (test_bit(fid, mlxsw_sp_port->active_vlans)) | ||
457 | return 0; | ||
458 | |||
461 | f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); | 459 | f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); |
462 | if (!f) { | 460 | if (!f) { |
463 | f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); | 461 | f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); |
@@ -515,7 +513,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, | |||
515 | } | 513 | } |
516 | 514 | ||
517 | err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, | 515 | err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, |
518 | true, false); | 516 | mlxsw_sp_port->uc_flood, true); |
519 | if (err) | 517 | if (err) |
520 | goto err_port_flood_set; | 518 | goto err_port_flood_set; |
521 | 519 | ||
@@ -997,13 +995,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, | |||
997 | } | 995 | } |
998 | 996 | ||
999 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 997 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1000 | u16 vid_begin, u16 vid_end, bool init) | 998 | u16 vid_begin, u16 vid_end) |
1001 | { | 999 | { |
1002 | struct net_device *dev = mlxsw_sp_port->dev; | 1000 | struct net_device *dev = mlxsw_sp_port->dev; |
1003 | u16 vid, pvid; | 1001 | u16 vid, pvid; |
1004 | int err; | 1002 | int err; |
1005 | 1003 | ||
1006 | if (!init && !mlxsw_sp_port->bridged) | 1004 | if (!mlxsw_sp_port->bridged) |
1007 | return -EINVAL; | 1005 | return -EINVAL; |
1008 | 1006 | ||
1009 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, | 1007 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, |
@@ -1014,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1014 | return err; | 1012 | return err; |
1015 | } | 1013 | } |
1016 | 1014 | ||
1017 | if (init) | ||
1018 | goto out; | ||
1019 | |||
1020 | pvid = mlxsw_sp_port->pvid; | 1015 | pvid = mlxsw_sp_port->pvid; |
1021 | if (pvid >= vid_begin && pvid <= vid_end) { | 1016 | if (pvid >= vid_begin && pvid <= vid_end) { |
1022 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); | 1017 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); |
@@ -1028,7 +1023,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1028 | 1023 | ||
1029 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); | 1024 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); |
1030 | 1025 | ||
1031 | out: | ||
1032 | /* Changing activity bits only if HW operation succeded */ | 1026 | /* Changing activity bits only if HW operation succeded */ |
1033 | for (vid = vid_begin; vid <= vid_end; vid++) | 1027 | for (vid = vid_begin; vid <= vid_end; vid++) |
1034 | clear_bit(vid, mlxsw_sp_port->active_vlans); | 1028 | clear_bit(vid, mlxsw_sp_port->active_vlans); |
@@ -1039,8 +1033,8 @@ out: | |||
1039 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1033 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1040 | const struct switchdev_obj_port_vlan *vlan) | 1034 | const struct switchdev_obj_port_vlan *vlan) |
1041 | { | 1035 | { |
1042 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, | 1036 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, |
1043 | vlan->vid_begin, vlan->vid_end, false); | 1037 | vlan->vid_end); |
1044 | } | 1038 | } |
1045 | 1039 | ||
1046 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | 1040 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) |
@@ -1048,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | |||
1048 | u16 vid; | 1042 | u16 vid; |
1049 | 1043 | ||
1050 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) | 1044 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) |
1051 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); | 1045 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); |
1052 | } | 1046 | } |
1053 | 1047 | ||
1054 | static int | 1048 | static int |
@@ -1546,32 +1540,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) | |||
1546 | mlxsw_sp_fdb_fini(mlxsw_sp); | 1540 | mlxsw_sp_fdb_fini(mlxsw_sp); |
1547 | } | 1541 | } |
1548 | 1542 | ||
1549 | int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) | ||
1550 | { | ||
1551 | struct net_device *dev = mlxsw_sp_port->dev; | ||
1552 | int err; | ||
1553 | |||
1554 | /* Allow only untagged packets to ingress and tag them internally | ||
1555 | * with VID 1. | ||
1556 | */ | ||
1557 | mlxsw_sp_port->pvid = 1; | ||
1558 | err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, | ||
1559 | true); | ||
1560 | if (err) { | ||
1561 | netdev_err(dev, "Unable to init VLANs\n"); | ||
1562 | return err; | ||
1563 | } | ||
1564 | |||
1565 | /* Add implicit VLAN interface in the device, so that untagged | ||
1566 | * packets will be classified to the default vFID. | ||
1567 | */ | ||
1568 | err = mlxsw_sp_port_add_vid(dev, 0, 1); | ||
1569 | if (err) | ||
1570 | netdev_err(dev, "Failed to configure default vFID\n"); | ||
1571 | |||
1572 | return err; | ||
1573 | } | ||
1574 | |||
1575 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) | 1543 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) |
1576 | { | 1544 | { |
1577 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; | 1545 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 470d7696e9fe..ed8e30186400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h | |||
@@ -56,6 +56,10 @@ enum { | |||
56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, | 56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, |
57 | MLXSW_TRAP_ID_ARPBC = 0x50, | 57 | MLXSW_TRAP_ID_ARPBC = 0x50, |
58 | MLXSW_TRAP_ID_ARPUC = 0x51, | 58 | MLXSW_TRAP_ID_ARPUC = 0x51, |
59 | MLXSW_TRAP_ID_MTUERROR = 0x52, | ||
60 | MLXSW_TRAP_ID_TTLERROR = 0x53, | ||
61 | MLXSW_TRAP_ID_LBERROR = 0x54, | ||
62 | MLXSW_TRAP_ID_OSPF = 0x55, | ||
59 | MLXSW_TRAP_ID_IP2ME = 0x5F, | 63 | MLXSW_TRAP_ID_IP2ME = 0x5F, |
60 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, | 64 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, |
61 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, | 65 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 88678c172b19..39dadfca84ef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
@@ -41,7 +41,6 @@ | |||
41 | * Chris Telfer <chris.telfer@netronome.com> | 41 | * Chris Telfer <chris.telfer@netronome.com> |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/version.h> | ||
45 | #include <linux/module.h> | 44 | #include <linux/module.h> |
46 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
47 | #include <linux/init.h> | 46 | #include <linux/init.h> |
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) | |||
1441 | 1440 | ||
1442 | nfp_net_set_hash(nn->netdev, skb, rxd); | 1441 | nfp_net_set_hash(nn->netdev, skb, rxd); |
1443 | 1442 | ||
1444 | /* Pad small frames to minimum */ | ||
1445 | if (skb_put_padto(skb, 60)) | ||
1446 | break; | ||
1447 | |||
1448 | /* Stats update */ | 1443 | /* Stats update */ |
1449 | u64_stats_update_begin(&r_vec->rx_sync); | 1444 | u64_stats_update_begin(&r_vec->rx_sync); |
1450 | r_vec->rx_pkts++; | 1445 | r_vec->rx_pkts++; |
@@ -2049,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev) | |||
2049 | 2044 | ||
2050 | nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), | 2045 | nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), |
2051 | GFP_KERNEL); | 2046 | GFP_KERNEL); |
2052 | if (!nn->rx_rings) | 2047 | if (!nn->rx_rings) { |
2048 | err = -ENOMEM; | ||
2053 | goto err_free_lsc; | 2049 | goto err_free_lsc; |
2050 | } | ||
2054 | nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings), | 2051 | nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings), |
2055 | GFP_KERNEL); | 2052 | GFP_KERNEL); |
2056 | if (!nn->tx_rings) | 2053 | if (!nn->tx_rings) { |
2054 | err = -ENOMEM; | ||
2057 | goto err_free_rx_rings; | 2055 | goto err_free_rx_rings; |
2056 | } | ||
2058 | 2057 | ||
2059 | for (r = 0; r < nn->num_r_vecs; r++) { | 2058 | for (r = 0; r < nn->num_r_vecs; r++) { |
2060 | err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); | 2059 | err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 7d7933d00b8f..4c9897220969 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |||
@@ -40,7 +40,6 @@ | |||
40 | * Brad Petrus <brad.petrus@netronome.com> | 40 | * Brad Petrus <brad.petrus@netronome.com> |
41 | */ | 41 | */ |
42 | 42 | ||
43 | #include <linux/version.h> | ||
44 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
45 | #include <linux/netdevice.h> | 44 | #include <linux/netdevice.h> |
46 | #include <linux/etherdevice.h> | 45 | #include <linux/etherdevice.h> |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 37abef016a0a..f7062cb648e1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c | |||
@@ -38,7 +38,6 @@ | |||
38 | * Rolf Neugebauer <rolf.neugebauer@netronome.com> | 38 | * Rolf Neugebauer <rolf.neugebauer@netronome.com> |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #include <linux/version.h> | ||
42 | #include <linux/module.h> | 41 | #include <linux/module.h> |
43 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
44 | #include <linux/init.h> | 43 | #include <linux/init.h> |
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, | |||
134 | } | 133 | } |
135 | 134 | ||
136 | nfp_net_get_fw_version(&fw_ver, ctrl_bar); | 135 | nfp_net_get_fw_version(&fw_ver, ctrl_bar); |
137 | if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { | 136 | if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { |
138 | dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", | 137 | dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", |
139 | fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); | 138 | fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); |
140 | err = -EINVAL; | 139 | err = -EINVAL; |
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, | |||
142 | } | 141 | } |
143 | 142 | ||
144 | /* Determine stride */ | 143 | /* Determine stride */ |
145 | if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) || | 144 | if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { |
146 | nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) || | ||
147 | nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) { | ||
148 | stride = 2; | 145 | stride = 2; |
149 | tx_bar_no = NFP_NET_Q0_BAR; | 146 | tx_bar_no = NFP_NET_Q0_BAR; |
150 | rx_bar_no = NFP_NET_Q1_BAR; | 147 | rx_bar_no = NFP_NET_Q1_BAR; |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 4d4ecba0aad9..8e13ec84c538 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -475,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) | |||
475 | mac[5] = tmp >> 8; | 475 | mac[5] = tmp >> 8; |
476 | } | 476 | } |
477 | 477 | ||
478 | static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable) | ||
479 | { | ||
480 | if (enable) | ||
481 | clk_prepare_enable(pldat->clk); | ||
482 | else | ||
483 | clk_disable_unprepare(pldat->clk); | ||
484 | } | ||
485 | |||
486 | static void __lpc_params_setup(struct netdata_local *pldat) | 478 | static void __lpc_params_setup(struct netdata_local *pldat) |
487 | { | 479 | { |
488 | u32 tmp; | 480 | u32 tmp; |
@@ -1056,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev) | |||
1056 | writel(0, LPC_ENET_MAC2(pldat->net_base)); | 1048 | writel(0, LPC_ENET_MAC2(pldat->net_base)); |
1057 | spin_unlock_irqrestore(&pldat->lock, flags); | 1049 | spin_unlock_irqrestore(&pldat->lock, flags); |
1058 | 1050 | ||
1059 | __lpc_eth_clock_enable(pldat, false); | 1051 | clk_disable_unprepare(pldat->clk); |
1060 | 1052 | ||
1061 | return 0; | 1053 | return 0; |
1062 | } | 1054 | } |
@@ -1197,11 +1189,14 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) | |||
1197 | static int lpc_eth_open(struct net_device *ndev) | 1189 | static int lpc_eth_open(struct net_device *ndev) |
1198 | { | 1190 | { |
1199 | struct netdata_local *pldat = netdev_priv(ndev); | 1191 | struct netdata_local *pldat = netdev_priv(ndev); |
1192 | int ret; | ||
1200 | 1193 | ||
1201 | if (netif_msg_ifup(pldat)) | 1194 | if (netif_msg_ifup(pldat)) |
1202 | dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); | 1195 | dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); |
1203 | 1196 | ||
1204 | __lpc_eth_clock_enable(pldat, true); | 1197 | ret = clk_prepare_enable(pldat->clk); |
1198 | if (ret) | ||
1199 | return ret; | ||
1205 | 1200 | ||
1206 | /* Suspended PHY makes LPC ethernet core block, so resume now */ | 1201 | /* Suspended PHY makes LPC ethernet core block, so resume now */ |
1207 | phy_resume(ndev->phydev); | 1202 | phy_resume(ndev->phydev); |
@@ -1320,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) | |||
1320 | } | 1315 | } |
1321 | 1316 | ||
1322 | /* Enable network clock */ | 1317 | /* Enable network clock */ |
1323 | __lpc_eth_clock_enable(pldat, true); | 1318 | ret = clk_prepare_enable(pldat->clk); |
1319 | if (ret) | ||
1320 | goto err_out_clk_put; | ||
1324 | 1321 | ||
1325 | /* Map IO space */ | 1322 | /* Map IO space */ |
1326 | pldat->net_base = ioremap(res->start, resource_size(res)); | 1323 | pldat->net_base = ioremap(res->start, resource_size(res)); |
@@ -1454,6 +1451,7 @@ err_out_iounmap: | |||
1454 | iounmap(pldat->net_base); | 1451 | iounmap(pldat->net_base); |
1455 | err_out_disable_clocks: | 1452 | err_out_disable_clocks: |
1456 | clk_disable_unprepare(pldat->clk); | 1453 | clk_disable_unprepare(pldat->clk); |
1454 | err_out_clk_put: | ||
1457 | clk_put(pldat->clk); | 1455 | clk_put(pldat->clk); |
1458 | err_out_free_dev: | 1456 | err_out_free_dev: |
1459 | free_netdev(ndev); | 1457 | free_netdev(ndev); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 35e53771533f..45ab74676573 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -561,9 +561,18 @@ struct qed_dev { | |||
561 | static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, | 561 | static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, |
562 | u32 concrete_fid) | 562 | u32 concrete_fid) |
563 | { | 563 | { |
564 | u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); | ||
564 | u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); | 565 | u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); |
566 | u8 vf_valid = GET_FIELD(concrete_fid, | ||
567 | PXP_CONCRETE_FID_VFVALID); | ||
568 | u8 sw_fid; | ||
565 | 569 | ||
566 | return pfid; | 570 | if (vf_valid) |
571 | sw_fid = vfid + MAX_NUM_PFS; | ||
572 | else | ||
573 | sw_fid = pfid; | ||
574 | |||
575 | return sw_fid; | ||
567 | } | 576 | } |
568 | 577 | ||
569 | #define PURE_LB_TC 8 | 578 | #define PURE_LB_TC 8 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d0dc28f93c0e..3656d2fd673d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "qed_dcbx.h" | 19 | #include "qed_dcbx.h" |
20 | #include "qed_hsi.h" | 20 | #include "qed_hsi.h" |
21 | #include "qed_sp.h" | 21 | #include "qed_sp.h" |
22 | #include "qed_sriov.h" | ||
22 | #ifdef CONFIG_DCB | 23 | #ifdef CONFIG_DCB |
23 | #include <linux/qed/qed_eth_if.h> | 24 | #include <linux/qed/qed_eth_if.h> |
24 | #endif | 25 | #endif |
@@ -52,40 +53,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) | |||
52 | DCBX_APP_SF_ETHTYPE); | 53 | DCBX_APP_SF_ETHTYPE); |
53 | } | 54 | } |
54 | 55 | ||
56 | static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) | ||
57 | { | ||
58 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); | ||
59 | |||
60 | /* Old MFW */ | ||
61 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
62 | return qed_dcbx_app_ethtype(app_info_bitmap); | ||
63 | |||
64 | return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); | ||
65 | } | ||
66 | |||
55 | static bool qed_dcbx_app_port(u32 app_info_bitmap) | 67 | static bool qed_dcbx_app_port(u32 app_info_bitmap) |
56 | { | 68 | { |
57 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == | 69 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == |
58 | DCBX_APP_SF_PORT); | 70 | DCBX_APP_SF_PORT); |
59 | } | 71 | } |
60 | 72 | ||
61 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) | 73 | static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) |
62 | { | 74 | { |
63 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 75 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); |
64 | proto_id == QED_ETH_TYPE_DEFAULT); | 76 | |
77 | /* Old MFW */ | ||
78 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
79 | return qed_dcbx_app_port(app_info_bitmap); | ||
80 | |||
81 | return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); | ||
65 | } | 82 | } |
66 | 83 | ||
67 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) | 84 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
68 | { | 85 | { |
69 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 86 | bool ethtype; |
70 | proto_id == QED_TCP_PORT_ISCSI); | 87 | |
88 | if (ieee) | ||
89 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
90 | else | ||
91 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
92 | |||
93 | return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); | ||
71 | } | 94 | } |
72 | 95 | ||
73 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) | 96 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
74 | { | 97 | { |
75 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 98 | bool port; |
76 | proto_id == QED_ETH_TYPE_FCOE); | 99 | |
100 | if (ieee) | ||
101 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
102 | DCBX_APP_SF_IEEE_TCP_PORT); | ||
103 | else | ||
104 | port = qed_dcbx_app_port(app_info_bitmap); | ||
105 | |||
106 | return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); | ||
77 | } | 107 | } |
78 | 108 | ||
79 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) | 109 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
80 | { | 110 | { |
81 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 111 | bool ethtype; |
82 | proto_id == QED_ETH_TYPE_ROCE); | 112 | |
113 | if (ieee) | ||
114 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
115 | else | ||
116 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
117 | |||
118 | return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); | ||
83 | } | 119 | } |
84 | 120 | ||
85 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) | 121 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
86 | { | 122 | { |
87 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 123 | bool ethtype; |
88 | proto_id == QED_UDP_PORT_TYPE_ROCE_V2); | 124 | |
125 | if (ieee) | ||
126 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
127 | else | ||
128 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
129 | |||
130 | return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); | ||
131 | } | ||
132 | |||
133 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) | ||
134 | { | ||
135 | bool port; | ||
136 | |||
137 | if (ieee) | ||
138 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
139 | DCBX_APP_SF_IEEE_UDP_PORT); | ||
140 | else | ||
141 | port = qed_dcbx_app_port(app_info_bitmap); | ||
142 | |||
143 | return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); | ||
89 | } | 144 | } |
90 | 145 | ||
91 | static void | 146 | static void |
@@ -164,17 +219,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | |||
164 | static bool | 219 | static bool |
165 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, | 220 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, |
166 | u32 app_prio_bitmap, | 221 | u32 app_prio_bitmap, |
167 | u16 id, enum dcbx_protocol_type *type) | 222 | u16 id, enum dcbx_protocol_type *type, bool ieee) |
168 | { | 223 | { |
169 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { | 224 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { |
170 | *type = DCBX_PROTOCOL_FCOE; | 225 | *type = DCBX_PROTOCOL_FCOE; |
171 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { | 226 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { |
172 | *type = DCBX_PROTOCOL_ROCE; | 227 | *type = DCBX_PROTOCOL_ROCE; |
173 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { | 228 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { |
174 | *type = DCBX_PROTOCOL_ISCSI; | 229 | *type = DCBX_PROTOCOL_ISCSI; |
175 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { | 230 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { |
176 | *type = DCBX_PROTOCOL_ETH; | 231 | *type = DCBX_PROTOCOL_ETH; |
177 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { | 232 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { |
178 | *type = DCBX_PROTOCOL_ROCE_V2; | 233 | *type = DCBX_PROTOCOL_ROCE_V2; |
179 | } else { | 234 | } else { |
180 | *type = DCBX_MAX_PROTOCOL_TYPE; | 235 | *type = DCBX_MAX_PROTOCOL_TYPE; |
@@ -194,17 +249,18 @@ static int | |||
194 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | 249 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, |
195 | struct qed_dcbx_results *p_data, | 250 | struct qed_dcbx_results *p_data, |
196 | struct dcbx_app_priority_entry *p_tbl, | 251 | struct dcbx_app_priority_entry *p_tbl, |
197 | u32 pri_tc_tbl, int count, bool dcbx_enabled) | 252 | u32 pri_tc_tbl, int count, u8 dcbx_version) |
198 | { | 253 | { |
199 | u8 tc, priority_map; | 254 | u8 tc, priority_map; |
200 | enum dcbx_protocol_type type; | 255 | enum dcbx_protocol_type type; |
256 | bool enable, ieee; | ||
201 | u16 protocol_id; | 257 | u16 protocol_id; |
202 | int priority; | 258 | int priority; |
203 | bool enable; | ||
204 | int i; | 259 | int i; |
205 | 260 | ||
206 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); | 261 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); |
207 | 262 | ||
263 | ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); | ||
208 | /* Parse APP TLV */ | 264 | /* Parse APP TLV */ |
209 | for (i = 0; i < count; i++) { | 265 | for (i = 0; i < count; i++) { |
210 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 266 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
@@ -219,7 +275,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
219 | 275 | ||
220 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); | 276 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); |
221 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 277 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
222 | protocol_id, &type)) { | 278 | protocol_id, &type, ieee)) { |
223 | /* ETH always have the enable bit reset, as it gets | 279 | /* ETH always have the enable bit reset, as it gets |
224 | * vlan information per packet. For other protocols, | 280 | * vlan information per packet. For other protocols, |
225 | * should be set according to the dcbx_enabled | 281 | * should be set according to the dcbx_enabled |
@@ -275,15 +331,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
275 | struct dcbx_ets_feature *p_ets; | 331 | struct dcbx_ets_feature *p_ets; |
276 | struct qed_hw_info *p_info; | 332 | struct qed_hw_info *p_info; |
277 | u32 pri_tc_tbl, flags; | 333 | u32 pri_tc_tbl, flags; |
278 | bool dcbx_enabled; | 334 | u8 dcbx_version; |
279 | int num_entries; | 335 | int num_entries; |
280 | int rc = 0; | 336 | int rc = 0; |
281 | 337 | ||
282 | /* If DCBx version is non zero, then negotiation was | ||
283 | * successfuly performed | ||
284 | */ | ||
285 | flags = p_hwfn->p_dcbx_info->operational.flags; | 338 | flags = p_hwfn->p_dcbx_info->operational.flags; |
286 | dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); | 339 | dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); |
287 | 340 | ||
288 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; | 341 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; |
289 | p_tbl = p_app->app_pri_tbl; | 342 | p_tbl = p_app->app_pri_tbl; |
@@ -295,13 +348,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
295 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); | 348 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); |
296 | 349 | ||
297 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, | 350 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, |
298 | num_entries, dcbx_enabled); | 351 | num_entries, dcbx_version); |
299 | if (rc) | 352 | if (rc) |
300 | return rc; | 353 | return rc; |
301 | 354 | ||
302 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); | 355 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); |
303 | data.pf_id = p_hwfn->rel_pf_id; | 356 | data.pf_id = p_hwfn->rel_pf_id; |
304 | data.dcbx_enabled = dcbx_enabled; | 357 | data.dcbx_enabled = !!dcbx_version; |
305 | 358 | ||
306 | qed_dcbx_dp_protocol(p_hwfn, &data); | 359 | qed_dcbx_dp_protocol(p_hwfn, &data); |
307 | 360 | ||
@@ -400,7 +453,7 @@ static void | |||
400 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | 453 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, |
401 | struct dcbx_app_priority_feature *p_app, | 454 | struct dcbx_app_priority_feature *p_app, |
402 | struct dcbx_app_priority_entry *p_tbl, | 455 | struct dcbx_app_priority_entry *p_tbl, |
403 | struct qed_dcbx_params *p_params) | 456 | struct qed_dcbx_params *p_params, bool ieee) |
404 | { | 457 | { |
405 | struct qed_app_entry *entry; | 458 | struct qed_app_entry *entry; |
406 | u8 pri_map; | 459 | u8 pri_map; |
@@ -414,15 +467,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | |||
414 | DCBX_APP_NUM_ENTRIES); | 467 | DCBX_APP_NUM_ENTRIES); |
415 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 468 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
416 | entry = &p_params->app_entry[i]; | 469 | entry = &p_params->app_entry[i]; |
417 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | 470 | if (ieee) { |
418 | DCBX_APP_SF)); | 471 | u8 sf_ieee; |
472 | u32 val; | ||
473 | |||
474 | sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
475 | DCBX_APP_SF_IEEE); | ||
476 | switch (sf_ieee) { | ||
477 | case DCBX_APP_SF_IEEE_RESERVED: | ||
478 | /* Old MFW */ | ||
479 | val = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
480 | DCBX_APP_SF); | ||
481 | entry->sf_ieee = val ? | ||
482 | QED_DCBX_SF_IEEE_TCP_UDP_PORT : | ||
483 | QED_DCBX_SF_IEEE_ETHTYPE; | ||
484 | break; | ||
485 | case DCBX_APP_SF_IEEE_ETHTYPE: | ||
486 | entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; | ||
487 | break; | ||
488 | case DCBX_APP_SF_IEEE_TCP_PORT: | ||
489 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; | ||
490 | break; | ||
491 | case DCBX_APP_SF_IEEE_UDP_PORT: | ||
492 | entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; | ||
493 | break; | ||
494 | case DCBX_APP_SF_IEEE_TCP_UDP_PORT: | ||
495 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; | ||
496 | break; | ||
497 | } | ||
498 | } else { | ||
499 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
500 | DCBX_APP_SF)); | ||
501 | } | ||
502 | |||
419 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); | 503 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); |
420 | entry->prio = ffs(pri_map) - 1; | 504 | entry->prio = ffs(pri_map) - 1; |
421 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 505 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
422 | DCBX_APP_PROTOCOL_ID); | 506 | DCBX_APP_PROTOCOL_ID); |
423 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 507 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
424 | entry->proto_id, | 508 | entry->proto_id, |
425 | &entry->proto_type); | 509 | &entry->proto_type, ieee); |
426 | } | 510 | } |
427 | 511 | ||
428 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, | 512 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, |
@@ -483,7 +567,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, | |||
483 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); | 567 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); |
484 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); | 568 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); |
485 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); | 569 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); |
486 | pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); | 570 | pri_map = p_ets->pri_tc_tbl[0]; |
487 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { | 571 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { |
488 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; | 572 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; |
489 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; | 573 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; |
@@ -500,9 +584,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, | |||
500 | struct dcbx_app_priority_feature *p_app, | 584 | struct dcbx_app_priority_feature *p_app, |
501 | struct dcbx_app_priority_entry *p_tbl, | 585 | struct dcbx_app_priority_entry *p_tbl, |
502 | struct dcbx_ets_feature *p_ets, | 586 | struct dcbx_ets_feature *p_ets, |
503 | u32 pfc, struct qed_dcbx_params *p_params) | 587 | u32 pfc, struct qed_dcbx_params *p_params, bool ieee) |
504 | { | 588 | { |
505 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); | 589 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); |
506 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); | 590 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); |
507 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); | 591 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); |
508 | } | 592 | } |
@@ -516,7 +600,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, | |||
516 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; | 600 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; |
517 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 601 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
518 | p_feat->app.app_pri_tbl, &p_feat->ets, | 602 | p_feat->app.app_pri_tbl, &p_feat->ets, |
519 | p_feat->pfc, ¶ms->local.params); | 603 | p_feat->pfc, ¶ms->local.params, false); |
520 | params->local.valid = true; | 604 | params->local.valid = true; |
521 | } | 605 | } |
522 | 606 | ||
@@ -529,7 +613,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, | |||
529 | p_feat = &p_hwfn->p_dcbx_info->remote.features; | 613 | p_feat = &p_hwfn->p_dcbx_info->remote.features; |
530 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 614 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
531 | p_feat->app.app_pri_tbl, &p_feat->ets, | 615 | p_feat->app.app_pri_tbl, &p_feat->ets, |
532 | p_feat->pfc, ¶ms->remote.params); | 616 | p_feat->pfc, ¶ms->remote.params, false); |
533 | params->remote.valid = true; | 617 | params->remote.valid = true; |
534 | } | 618 | } |
535 | 619 | ||
@@ -574,7 +658,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, | |||
574 | 658 | ||
575 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 659 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
576 | p_feat->app.app_pri_tbl, &p_feat->ets, | 660 | p_feat->app.app_pri_tbl, &p_feat->ets, |
577 | p_feat->pfc, ¶ms->operational.params); | 661 | p_feat->pfc, ¶ms->operational.params, |
662 | p_operational->ieee); | ||
578 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); | 663 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); |
579 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); | 664 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); |
580 | p_operational->err = err; | 665 | p_operational->err = err; |
@@ -861,6 +946,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn, | |||
861 | struct qed_ptt *p_ptt; | 946 | struct qed_ptt *p_ptt; |
862 | int rc; | 947 | int rc; |
863 | 948 | ||
949 | if (IS_VF(p_hwfn->cdev)) | ||
950 | return -EINVAL; | ||
951 | |||
864 | p_ptt = qed_ptt_acquire(p_hwfn); | 952 | p_ptt = qed_ptt_acquire(p_hwfn); |
865 | if (!p_ptt) | 953 | if (!p_ptt) |
866 | return -EBUSY; | 954 | return -EBUSY; |
@@ -900,6 +988,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn, | |||
900 | if (p_params->pfc.prio[i]) | 988 | if (p_params->pfc.prio[i]) |
901 | pfc_map |= BIT(i); | 989 | pfc_map |= BIT(i); |
902 | 990 | ||
991 | *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK; | ||
903 | *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); | 992 | *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); |
904 | 993 | ||
905 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); | 994 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); |
@@ -944,7 +1033,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
944 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); | 1033 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); |
945 | p_ets->pri_tc_tbl[0] |= val; | 1034 | p_ets->pri_tc_tbl[0] |= val; |
946 | } | 1035 | } |
947 | p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]); | ||
948 | for (i = 0; i < 2; i++) { | 1036 | for (i = 0; i < 2; i++) { |
949 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); | 1037 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); |
950 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); | 1038 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); |
@@ -954,7 +1042,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
954 | static void | 1042 | static void |
955 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | 1043 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, |
956 | struct dcbx_app_priority_feature *p_app, | 1044 | struct dcbx_app_priority_feature *p_app, |
957 | struct qed_dcbx_params *p_params) | 1045 | struct qed_dcbx_params *p_params, bool ieee) |
958 | { | 1046 | { |
959 | u32 *entry; | 1047 | u32 *entry; |
960 | int i; | 1048 | int i; |
@@ -975,12 +1063,45 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | |||
975 | 1063 | ||
976 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 1064 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
977 | entry = &p_app->app_pri_tbl[i].entry; | 1065 | entry = &p_app->app_pri_tbl[i].entry; |
978 | *entry &= ~DCBX_APP_SF_MASK; | 1066 | *entry = 0; |
979 | if (p_params->app_entry[i].ethtype) | 1067 | if (ieee) { |
980 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | 1068 | *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK); |
981 | DCBX_APP_SF_SHIFT); | 1069 | switch (p_params->app_entry[i].sf_ieee) { |
982 | else | 1070 | case QED_DCBX_SF_IEEE_ETHTYPE: |
983 | *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); | 1071 | *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << |
1072 | DCBX_APP_SF_IEEE_SHIFT); | ||
1073 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | ||
1074 | DCBX_APP_SF_SHIFT); | ||
1075 | break; | ||
1076 | case QED_DCBX_SF_IEEE_TCP_PORT: | ||
1077 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << | ||
1078 | DCBX_APP_SF_IEEE_SHIFT); | ||
1079 | *entry |= ((u32)DCBX_APP_SF_PORT << | ||
1080 | DCBX_APP_SF_SHIFT); | ||
1081 | break; | ||
1082 | case QED_DCBX_SF_IEEE_UDP_PORT: | ||
1083 | *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << | ||
1084 | DCBX_APP_SF_IEEE_SHIFT); | ||
1085 | *entry |= ((u32)DCBX_APP_SF_PORT << | ||
1086 | DCBX_APP_SF_SHIFT); | ||
1087 | break; | ||
1088 | case QED_DCBX_SF_IEEE_TCP_UDP_PORT: | ||
1089 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << | ||
1090 | DCBX_APP_SF_IEEE_SHIFT); | ||
1091 | *entry |= ((u32)DCBX_APP_SF_PORT << | ||
1092 | DCBX_APP_SF_SHIFT); | ||
1093 | break; | ||
1094 | } | ||
1095 | } else { | ||
1096 | *entry &= ~DCBX_APP_SF_MASK; | ||
1097 | if (p_params->app_entry[i].ethtype) | ||
1098 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | ||
1099 | DCBX_APP_SF_SHIFT); | ||
1100 | else | ||
1101 | *entry |= ((u32)DCBX_APP_SF_PORT << | ||
1102 | DCBX_APP_SF_SHIFT); | ||
1103 | } | ||
1104 | |||
984 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; | 1105 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; |
985 | *entry |= ((u32)p_params->app_entry[i].proto_id << | 1106 | *entry |= ((u32)p_params->app_entry[i].proto_id << |
986 | DCBX_APP_PROTOCOL_ID_SHIFT); | 1107 | DCBX_APP_PROTOCOL_ID_SHIFT); |
@@ -995,15 +1116,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
995 | struct dcbx_local_params *local_admin, | 1116 | struct dcbx_local_params *local_admin, |
996 | struct qed_dcbx_set *params) | 1117 | struct qed_dcbx_set *params) |
997 | { | 1118 | { |
1119 | bool ieee = false; | ||
1120 | |||
998 | local_admin->flags = 0; | 1121 | local_admin->flags = 0; |
999 | memcpy(&local_admin->features, | 1122 | memcpy(&local_admin->features, |
1000 | &p_hwfn->p_dcbx_info->operational.features, | 1123 | &p_hwfn->p_dcbx_info->operational.features, |
1001 | sizeof(local_admin->features)); | 1124 | sizeof(local_admin->features)); |
1002 | 1125 | ||
1003 | if (params->enabled) | 1126 | if (params->enabled) { |
1004 | local_admin->config = params->ver_num; | 1127 | local_admin->config = params->ver_num; |
1005 | else | 1128 | ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); |
1129 | } else { | ||
1006 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; | 1130 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; |
1131 | } | ||
1007 | 1132 | ||
1008 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) | 1133 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) |
1009 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, | 1134 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, |
@@ -1015,7 +1140,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
1015 | 1140 | ||
1016 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) | 1141 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) |
1017 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, | 1142 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, |
1018 | ¶ms->config.params); | 1143 | ¶ms->config.params, ieee); |
1019 | } | 1144 | } |
1020 | 1145 | ||
1021 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, | 1146 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
@@ -1064,7 +1189,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, | |||
1064 | return 0; | 1189 | return 0; |
1065 | } | 1190 | } |
1066 | 1191 | ||
1067 | dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL); | 1192 | dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); |
1068 | if (!dcbx_info) { | 1193 | if (!dcbx_info) { |
1069 | DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n"); | 1194 | DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n"); |
1070 | return -ENOMEM; | 1195 | return -ENOMEM; |
@@ -1101,7 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, | |||
1101 | { | 1226 | { |
1102 | struct qed_dcbx_get *dcbx_info; | 1227 | struct qed_dcbx_get *dcbx_info; |
1103 | 1228 | ||
1104 | dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL); | 1229 | dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); |
1105 | if (!dcbx_info) { | 1230 | if (!dcbx_info) { |
1106 | DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n"); | 1231 | DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n"); |
1107 | return NULL; | 1232 | return NULL; |
@@ -1596,8 +1721,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev, | |||
1596 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) | 1721 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) |
1597 | break; | 1722 | break; |
1598 | /* First empty slot */ | 1723 | /* First empty slot */ |
1599 | if (!entry->proto_id) | 1724 | if (!entry->proto_id) { |
1725 | dcbx_set.config.params.num_app_entries++; | ||
1600 | break; | 1726 | break; |
1727 | } | ||
1601 | } | 1728 | } |
1602 | 1729 | ||
1603 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 1730 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
@@ -2117,8 +2244,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) | |||
2117 | (entry->proto_id == app->protocol)) | 2244 | (entry->proto_id == app->protocol)) |
2118 | break; | 2245 | break; |
2119 | /* First empty slot */ | 2246 | /* First empty slot */ |
2120 | if (!entry->proto_id) | 2247 | if (!entry->proto_id) { |
2248 | dcbx_set.config.params.num_app_entries++; | ||
2121 | break; | 2249 | break; |
2250 | } | ||
2122 | } | 2251 | } |
2123 | 2252 | ||
2124 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 2253 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 592784019994..6f9d3b831a2a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry { | |||
6850 | #define DCBX_APP_SF_SHIFT 8 | 6850 | #define DCBX_APP_SF_SHIFT 8 |
6851 | #define DCBX_APP_SF_ETHTYPE 0 | 6851 | #define DCBX_APP_SF_ETHTYPE 0 |
6852 | #define DCBX_APP_SF_PORT 1 | 6852 | #define DCBX_APP_SF_PORT 1 |
6853 | #define DCBX_APP_SF_IEEE_MASK 0x0000f000 | ||
6854 | #define DCBX_APP_SF_IEEE_SHIFT 12 | ||
6855 | #define DCBX_APP_SF_IEEE_RESERVED 0 | ||
6856 | #define DCBX_APP_SF_IEEE_ETHTYPE 1 | ||
6857 | #define DCBX_APP_SF_IEEE_TCP_PORT 2 | ||
6858 | #define DCBX_APP_SF_IEEE_UDP_PORT 3 | ||
6859 | #define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 | ||
6860 | |||
6853 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 | 6861 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 |
6854 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 | 6862 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 |
6855 | }; | 6863 | }; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index a240f26344a4..f776a77794c5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
@@ -1153,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, | |||
1153 | p_drv_version = &union_data.drv_version; | 1153 | p_drv_version = &union_data.drv_version; |
1154 | p_drv_version->version = p_ver->version; | 1154 | p_drv_version->version = p_ver->version; |
1155 | 1155 | ||
1156 | for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { | 1156 | for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { |
1157 | val = cpu_to_be32(p_ver->name[i]); | 1157 | val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); |
1158 | *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; | 1158 | *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; |
1159 | } | 1159 | } |
1160 | 1160 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index e4bd02e46e57..9544e4c41359 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -722,11 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, | |||
722 | txq->tx_db.data.bd_prod = | 722 | txq->tx_db.data.bd_prod = |
723 | cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); | 723 | cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); |
724 | 724 | ||
725 | if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq)) | 725 | if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) |
726 | qede_update_tx_producer(txq); | 726 | qede_update_tx_producer(txq); |
727 | 727 | ||
728 | if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) | 728 | if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) |
729 | < (MAX_SKB_FRAGS + 1))) { | 729 | < (MAX_SKB_FRAGS + 1))) { |
730 | if (skb->xmit_more) | ||
731 | qede_update_tx_producer(txq); | ||
732 | |||
730 | netif_tx_stop_queue(netdev_txq); | 733 | netif_tx_stop_queue(netdev_txq); |
731 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, | 734 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, |
732 | "Stop queue was called\n"); | 735 | "Stop queue was called\n"); |
@@ -2517,7 +2520,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, | |||
2517 | edev->ops->register_ops(cdev, &qede_ll_ops, edev); | 2520 | edev->ops->register_ops(cdev, &qede_ll_ops, edev); |
2518 | 2521 | ||
2519 | #ifdef CONFIG_DCB | 2522 | #ifdef CONFIG_DCB |
2520 | qede_set_dcbnl_ops(edev->ndev); | 2523 | if (!IS_VF(edev)) |
2524 | qede_set_dcbnl_ops(edev->ndev); | ||
2521 | #endif | 2525 | #endif |
2522 | 2526 | ||
2523 | INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); | 2527 | INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fd973f4f16c7..49bad00a0f8f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define _QLCNIC_LINUX_MAJOR 5 | 38 | #define _QLCNIC_LINUX_MAJOR 5 |
39 | #define _QLCNIC_LINUX_MINOR 3 | 39 | #define _QLCNIC_LINUX_MINOR 3 |
40 | #define _QLCNIC_LINUX_SUBVERSION 64 | 40 | #define _QLCNIC_LINUX_SUBVERSION 65 |
41 | #define QLCNIC_LINUX_VERSIONID "5.3.64" | 41 | #define QLCNIC_LINUX_VERSIONID "5.3.65" |
42 | #define QLCNIC_DRV_IDC_VER 0x01 | 42 | #define QLCNIC_DRV_IDC_VER 0x01 |
43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d3b075..fedd7366713c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -102,7 +102,6 @@ | |||
102 | #define QLCNIC_RESPONSE_DESC 0x05 | 102 | #define QLCNIC_RESPONSE_DESC 0x05 |
103 | #define QLCNIC_LRO_DESC 0x12 | 103 | #define QLCNIC_LRO_DESC 0x12 |
104 | 104 | ||
105 | #define QLCNIC_TX_POLL_BUDGET 128 | ||
106 | #define QLCNIC_TCP_HDR_SIZE 20 | 105 | #define QLCNIC_TCP_HDR_SIZE 20 |
107 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 | 106 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 |
108 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) | 107 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) |
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) | |||
2008 | struct qlcnic_host_tx_ring *tx_ring; | 2007 | struct qlcnic_host_tx_ring *tx_ring; |
2009 | struct qlcnic_adapter *adapter; | 2008 | struct qlcnic_adapter *adapter; |
2010 | 2009 | ||
2011 | budget = QLCNIC_TX_POLL_BUDGET; | ||
2012 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); | 2010 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); |
2013 | adapter = tx_ring->adapter; | 2011 | adapter = tx_ring->adapter; |
2014 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); | 2012 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c8285..24061b9b92e8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | |||
@@ -156,10 +156,8 @@ struct qlcnic_vf_info { | |||
156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ | 156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ |
157 | }; | 157 | }; |
158 | 158 | ||
159 | struct qlcnic_async_work_list { | 159 | struct qlcnic_async_cmd { |
160 | struct list_head list; | 160 | struct list_head list; |
161 | struct work_struct work; | ||
162 | void *ptr; | ||
163 | struct qlcnic_cmd_args *cmd; | 161 | struct qlcnic_cmd_args *cmd; |
164 | }; | 162 | }; |
165 | 163 | ||
@@ -168,7 +166,10 @@ struct qlcnic_back_channel { | |||
168 | struct workqueue_struct *bc_trans_wq; | 166 | struct workqueue_struct *bc_trans_wq; |
169 | struct workqueue_struct *bc_async_wq; | 167 | struct workqueue_struct *bc_async_wq; |
170 | struct workqueue_struct *bc_flr_wq; | 168 | struct workqueue_struct *bc_flr_wq; |
171 | struct list_head async_list; | 169 | struct qlcnic_adapter *adapter; |
170 | struct list_head async_cmd_list; | ||
171 | struct work_struct vf_async_work; | ||
172 | spinlock_t queue_lock; /* async_cmd_list queue lock */ | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | struct qlcnic_sriov { | 175 | struct qlcnic_sriov { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729ba2e..d7107055ec60 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 | 29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 |
30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 | 30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 |
31 | 31 | ||
32 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); | ||
32 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); | 33 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); |
33 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); | 34 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); |
34 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); | 35 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); |
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) | |||
177 | } | 178 | } |
178 | 179 | ||
179 | bc->bc_async_wq = wq; | 180 | bc->bc_async_wq = wq; |
180 | INIT_LIST_HEAD(&bc->async_list); | 181 | INIT_LIST_HEAD(&bc->async_cmd_list); |
182 | INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); | ||
183 | spin_lock_init(&bc->queue_lock); | ||
184 | bc->adapter = adapter; | ||
181 | 185 | ||
182 | for (i = 0; i < num_vfs; i++) { | 186 | for (i = 0; i < num_vfs; i++) { |
183 | vf = &sriov->vf_info[i]; | 187 | vf = &sriov->vf_info[i]; |
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, | |||
1517 | 1521 | ||
1518 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) | 1522 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) |
1519 | { | 1523 | { |
1520 | struct list_head *head = &bc->async_list; | 1524 | struct list_head *head = &bc->async_cmd_list; |
1521 | struct qlcnic_async_work_list *entry; | 1525 | struct qlcnic_async_cmd *entry; |
1522 | 1526 | ||
1523 | flush_workqueue(bc->bc_async_wq); | 1527 | flush_workqueue(bc->bc_async_wq); |
1528 | cancel_work_sync(&bc->vf_async_work); | ||
1529 | |||
1530 | spin_lock(&bc->queue_lock); | ||
1524 | while (!list_empty(head)) { | 1531 | while (!list_empty(head)) { |
1525 | entry = list_entry(head->next, struct qlcnic_async_work_list, | 1532 | entry = list_entry(head->next, struct qlcnic_async_cmd, |
1526 | list); | 1533 | list); |
1527 | cancel_work_sync(&entry->work); | ||
1528 | list_del(&entry->list); | 1534 | list_del(&entry->list); |
1535 | kfree(entry->cmd); | ||
1529 | kfree(entry); | 1536 | kfree(entry); |
1530 | } | 1537 | } |
1538 | spin_unlock(&bc->queue_lock); | ||
1531 | } | 1539 | } |
1532 | 1540 | ||
1533 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | 1541 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) |
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | |||
1587 | 1595 | ||
1588 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) | 1596 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) |
1589 | { | 1597 | { |
1590 | struct qlcnic_async_work_list *entry; | 1598 | struct qlcnic_async_cmd *entry, *tmp; |
1591 | struct qlcnic_adapter *adapter; | 1599 | struct qlcnic_back_channel *bc; |
1592 | struct qlcnic_cmd_args *cmd; | 1600 | struct qlcnic_cmd_args *cmd; |
1601 | struct list_head *head; | ||
1602 | LIST_HEAD(del_list); | ||
1603 | |||
1604 | bc = container_of(work, struct qlcnic_back_channel, vf_async_work); | ||
1605 | head = &bc->async_cmd_list; | ||
1606 | |||
1607 | spin_lock(&bc->queue_lock); | ||
1608 | list_splice_init(head, &del_list); | ||
1609 | spin_unlock(&bc->queue_lock); | ||
1610 | |||
1611 | list_for_each_entry_safe(entry, tmp, &del_list, list) { | ||
1612 | list_del(&entry->list); | ||
1613 | cmd = entry->cmd; | ||
1614 | __qlcnic_sriov_issue_cmd(bc->adapter, cmd); | ||
1615 | kfree(entry); | ||
1616 | } | ||
1617 | |||
1618 | if (!list_empty(head)) | ||
1619 | queue_work(bc->bc_async_wq, &bc->vf_async_work); | ||
1593 | 1620 | ||
1594 | entry = container_of(work, struct qlcnic_async_work_list, work); | ||
1595 | adapter = entry->ptr; | ||
1596 | cmd = entry->cmd; | ||
1597 | __qlcnic_sriov_issue_cmd(adapter, cmd); | ||
1598 | return; | 1621 | return; |
1599 | } | 1622 | } |
1600 | 1623 | ||
1601 | static struct qlcnic_async_work_list * | 1624 | static struct qlcnic_async_cmd * |
1602 | qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) | 1625 | qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, |
1626 | struct qlcnic_cmd_args *cmd) | ||
1603 | { | 1627 | { |
1604 | struct list_head *node; | 1628 | struct qlcnic_async_cmd *entry = NULL; |
1605 | struct qlcnic_async_work_list *entry = NULL; | ||
1606 | u8 empty = 0; | ||
1607 | 1629 | ||
1608 | list_for_each(node, &bc->async_list) { | 1630 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
1609 | entry = list_entry(node, struct qlcnic_async_work_list, list); | 1631 | if (!entry) |
1610 | if (!work_pending(&entry->work)) { | 1632 | return NULL; |
1611 | empty = 1; | ||
1612 | break; | ||
1613 | } | ||
1614 | } | ||
1615 | 1633 | ||
1616 | if (!empty) { | 1634 | entry->cmd = cmd; |
1617 | entry = kzalloc(sizeof(struct qlcnic_async_work_list), | 1635 | |
1618 | GFP_ATOMIC); | 1636 | spin_lock(&bc->queue_lock); |
1619 | if (entry == NULL) | 1637 | list_add_tail(&entry->list, &bc->async_cmd_list); |
1620 | return NULL; | 1638 | spin_unlock(&bc->queue_lock); |
1621 | list_add_tail(&entry->list, &bc->async_list); | ||
1622 | } | ||
1623 | 1639 | ||
1624 | return entry; | 1640 | return entry; |
1625 | } | 1641 | } |
1626 | 1642 | ||
1627 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, | 1643 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, |
1628 | work_func_t func, void *data, | ||
1629 | struct qlcnic_cmd_args *cmd) | 1644 | struct qlcnic_cmd_args *cmd) |
1630 | { | 1645 | { |
1631 | struct qlcnic_async_work_list *entry = NULL; | 1646 | struct qlcnic_async_cmd *entry = NULL; |
1632 | 1647 | ||
1633 | entry = qlcnic_sriov_get_free_node_async_work(bc); | 1648 | entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); |
1634 | if (!entry) | 1649 | if (!entry) { |
1650 | qlcnic_free_mbx_args(cmd); | ||
1651 | kfree(cmd); | ||
1635 | return; | 1652 | return; |
1653 | } | ||
1636 | 1654 | ||
1637 | entry->ptr = data; | 1655 | queue_work(bc->bc_async_wq, &bc->vf_async_work); |
1638 | entry->cmd = cmd; | ||
1639 | INIT_WORK(&entry->work, func); | ||
1640 | queue_work(bc->bc_async_wq, &entry->work); | ||
1641 | } | 1656 | } |
1642 | 1657 | ||
1643 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | 1658 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, |
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | |||
1649 | if (adapter->need_fw_reset) | 1664 | if (adapter->need_fw_reset) |
1650 | return -EIO; | 1665 | return -EIO; |
1651 | 1666 | ||
1652 | qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, | 1667 | qlcnic_sriov_schedule_async_cmd(bc, cmd); |
1653 | adapter, cmd); | 1668 | |
1654 | return 0; | 1669 | return 0; |
1655 | } | 1670 | } |
1656 | 1671 | ||
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index deae10d7426d..5297bf77211c 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) | |||
467 | unsigned int rx_tail = cp->rx_tail; | 467 | unsigned int rx_tail = cp->rx_tail; |
468 | int rx; | 468 | int rx; |
469 | 469 | ||
470 | rx_status_loop: | ||
471 | rx = 0; | 470 | rx = 0; |
471 | rx_status_loop: | ||
472 | cpw16(IntrStatus, cp_rx_intr_mask); | 472 | cpw16(IntrStatus, cp_rx_intr_mask); |
473 | 473 | ||
474 | while (rx < budget) { | 474 | while (rx < budget) { |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 799d58d86e6d..054e795df90f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { | |||
201 | 201 | ||
202 | [ARSTR] = 0x0000, | 202 | [ARSTR] = 0x0000, |
203 | [TSU_CTRST] = 0x0004, | 203 | [TSU_CTRST] = 0x0004, |
204 | [TSU_FWSLC] = 0x0038, | ||
204 | [TSU_VTAG0] = 0x0058, | 205 | [TSU_VTAG0] = 0x0058, |
205 | [TSU_ADSBSY] = 0x0060, | 206 | [TSU_ADSBSY] = 0x0060, |
206 | [TSU_TEN] = 0x0064, | 207 | [TSU_TEN] = 0x0064, |
208 | [TSU_POST1] = 0x0070, | ||
209 | [TSU_POST2] = 0x0074, | ||
210 | [TSU_POST3] = 0x0078, | ||
211 | [TSU_POST4] = 0x007c, | ||
207 | [TSU_ADRH0] = 0x0100, | 212 | [TSU_ADRH0] = 0x0100, |
208 | 213 | ||
209 | [TXNLCR0] = 0x0080, | 214 | [TXNLCR0] = 0x0080, |
@@ -2786,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) | |||
2786 | { | 2791 | { |
2787 | if (sh_eth_is_rz_fast_ether(mdp)) { | 2792 | if (sh_eth_is_rz_fast_ether(mdp)) { |
2788 | sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ | 2793 | sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ |
2794 | sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, | ||
2795 | TSU_FWSLC); /* Enable POST registers */ | ||
2789 | return; | 2796 | return; |
2790 | } | 2797 | } |
2791 | 2798 | ||
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index f658fee74f18..e00a669e9e09 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -1517,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) | |||
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | #if BITS_PER_LONG == 64 | 1519 | #if BITS_PER_LONG == 64 |
1520 | BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); | ||
1520 | mask[0] = raw_mask[0]; | 1521 | mask[0] = raw_mask[0]; |
1521 | mask[1] = raw_mask[1]; | 1522 | mask[1] = raw_mask[1]; |
1522 | #else | 1523 | #else |
1524 | BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); | ||
1523 | mask[0] = raw_mask[0] & 0xffffffff; | 1525 | mask[0] = raw_mask[0] & 0xffffffff; |
1524 | mask[1] = raw_mask[0] >> 32; | 1526 | mask[1] = raw_mask[0] >> 32; |
1525 | mask[2] = raw_mask[1] & 0xffffffff; | 1527 | mask[2] = raw_mask[1] & 0xffffffff; |
1526 | mask[3] = raw_mask[1] >> 32; | ||
1527 | #endif | 1528 | #endif |
1528 | } | 1529 | } |
1529 | 1530 | ||
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 726b80f45906..503a3b6dce91 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -2275,6 +2275,13 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2275 | if (pd) { | 2275 | if (pd) { |
2276 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); | 2276 | memcpy(&lp->cfg, pd, sizeof(lp->cfg)); |
2277 | lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); | 2277 | lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); |
2278 | |||
2279 | if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) { | ||
2280 | dev_err(&pdev->dev, | ||
2281 | "at least one of 8-bit or 16-bit access support is required.\n"); | ||
2282 | ret = -ENXIO; | ||
2283 | goto out_free_netdev; | ||
2284 | } | ||
2278 | } | 2285 | } |
2279 | 2286 | ||
2280 | #if IS_BUILTIN(CONFIG_OF) | 2287 | #if IS_BUILTIN(CONFIG_OF) |
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 1a55c7976df0..ea8465467469 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
@@ -37,6 +37,27 @@ | |||
37 | #include <linux/smc91x.h> | 37 | #include <linux/smc91x.h> |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Any 16-bit access is performed with two 8-bit accesses if the hardware | ||
41 | * can't do it directly. Most registers are 16-bit so those are mandatory. | ||
42 | */ | ||
43 | #define SMC_outw_b(x, a, r) \ | ||
44 | do { \ | ||
45 | unsigned int __val16 = (x); \ | ||
46 | unsigned int __reg = (r); \ | ||
47 | SMC_outb(__val16, a, __reg); \ | ||
48 | SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \ | ||
49 | } while (0) | ||
50 | |||
51 | #define SMC_inw_b(a, r) \ | ||
52 | ({ \ | ||
53 | unsigned int __val16; \ | ||
54 | unsigned int __reg = r; \ | ||
55 | __val16 = SMC_inb(a, __reg); \ | ||
56 | __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \ | ||
57 | __val16; \ | ||
58 | }) | ||
59 | |||
60 | /* | ||
40 | * Define your architecture specific bus configuration parameters here. | 61 | * Define your architecture specific bus configuration parameters here. |
41 | */ | 62 | */ |
42 | 63 | ||
@@ -55,10 +76,30 @@ | |||
55 | #define SMC_IO_SHIFT (lp->io_shift) | 76 | #define SMC_IO_SHIFT (lp->io_shift) |
56 | 77 | ||
57 | #define SMC_inb(a, r) readb((a) + (r)) | 78 | #define SMC_inb(a, r) readb((a) + (r)) |
58 | #define SMC_inw(a, r) readw((a) + (r)) | 79 | #define SMC_inw(a, r) \ |
80 | ({ \ | ||
81 | unsigned int __smc_r = r; \ | ||
82 | SMC_16BIT(lp) ? readw((a) + __smc_r) : \ | ||
83 | SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \ | ||
84 | ({ BUG(); 0; }); \ | ||
85 | }) | ||
86 | |||
59 | #define SMC_inl(a, r) readl((a) + (r)) | 87 | #define SMC_inl(a, r) readl((a) + (r)) |
60 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | 88 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) |
89 | #define SMC_outw(v, a, r) \ | ||
90 | do { \ | ||
91 | unsigned int __v = v, __smc_r = r; \ | ||
92 | if (SMC_16BIT(lp)) \ | ||
93 | __SMC_outw(__v, a, __smc_r); \ | ||
94 | else if (SMC_8BIT(lp)) \ | ||
95 | SMC_outw_b(__v, a, __smc_r); \ | ||
96 | else \ | ||
97 | BUG(); \ | ||
98 | } while (0) | ||
99 | |||
61 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | 100 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) |
101 | #define SMC_insb(a, r, p, l) readsb((a) + (r), p, l) | ||
102 | #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l) | ||
62 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | 103 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) |
63 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | 104 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) |
64 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | 105 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) |
@@ -66,7 +107,7 @@ | |||
66 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | 107 | #define SMC_IRQ_FLAGS (-1) /* from resource */ |
67 | 108 | ||
68 | /* We actually can't write halfwords properly if not word aligned */ | 109 | /* We actually can't write halfwords properly if not word aligned */ |
69 | static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | 110 | static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg) |
70 | { | 111 | { |
71 | if ((machine_is_mainstone() || machine_is_stargate2() || | 112 | if ((machine_is_mainstone() || machine_is_stargate2() || |
72 | machine_is_pxa_idp()) && reg & 2) { | 113 | machine_is_pxa_idp()) && reg & 2) { |
@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, | |||
416 | 457 | ||
417 | #if ! SMC_CAN_USE_16BIT | 458 | #if ! SMC_CAN_USE_16BIT |
418 | 459 | ||
419 | /* | 460 | #define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg) |
420 | * Any 16-bit access is performed with two 8-bit accesses if the hardware | 461 | #define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg) |
421 | * can't do it directly. Most registers are 16-bit so those are mandatory. | ||
422 | */ | ||
423 | #define SMC_outw(x, ioaddr, reg) \ | ||
424 | do { \ | ||
425 | unsigned int __val16 = (x); \ | ||
426 | SMC_outb( __val16, ioaddr, reg ); \ | ||
427 | SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ | ||
428 | } while (0) | ||
429 | #define SMC_inw(ioaddr, reg) \ | ||
430 | ({ \ | ||
431 | unsigned int __val16; \ | ||
432 | __val16 = SMC_inb( ioaddr, reg ); \ | ||
433 | __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ | ||
434 | __val16; \ | ||
435 | }) | ||
436 | |||
437 | #define SMC_insw(a, r, p, l) BUG() | 462 | #define SMC_insw(a, r, p, l) BUG() |
438 | #define SMC_outsw(a, r, p, l) BUG() | 463 | #define SMC_outsw(a, r, p, l) BUG() |
439 | 464 | ||
@@ -445,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, | |||
445 | #endif | 470 | #endif |
446 | 471 | ||
447 | #if ! SMC_CAN_USE_8BIT | 472 | #if ! SMC_CAN_USE_8BIT |
473 | #undef SMC_inb | ||
448 | #define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) | 474 | #define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) |
475 | #undef SMC_outb | ||
449 | #define SMC_outb(x, ioaddr, reg) BUG() | 476 | #define SMC_outb(x, ioaddr, reg) BUG() |
450 | #define SMC_insb(a, r, p, l) BUG() | 477 | #define SMC_insb(a, r, p, l) BUG() |
451 | #define SMC_outsb(a, r, p, l) BUG() | 478 | #define SMC_outsb(a, r, p, l) BUG() |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index ca3134540d2d..4f8910b7db2e 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
@@ -1099,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev, | |||
1099 | goto err_out_free_bus_2; | 1099 | goto err_out_free_bus_2; |
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | if (smsc911x_mii_probe(dev) < 0) { | ||
1103 | SMSC_WARN(pdata, probe, "Error registering mii bus"); | ||
1104 | goto err_out_unregister_bus_3; | ||
1105 | } | ||
1106 | |||
1107 | return 0; | 1102 | return 0; |
1108 | 1103 | ||
1109 | err_out_unregister_bus_3: | ||
1110 | mdiobus_unregister(pdata->mii_bus); | ||
1111 | err_out_free_bus_2: | 1104 | err_out_free_bus_2: |
1112 | mdiobus_free(pdata->mii_bus); | 1105 | mdiobus_free(pdata->mii_bus); |
1113 | err_out_1: | 1106 | err_out_1: |
@@ -1514,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev) | |||
1514 | smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); | 1507 | smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); |
1515 | } | 1508 | } |
1516 | 1509 | ||
1510 | static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) | ||
1511 | { | ||
1512 | struct net_device *dev = dev_id; | ||
1513 | struct smsc911x_data *pdata = netdev_priv(dev); | ||
1514 | u32 intsts = smsc911x_reg_read(pdata, INT_STS); | ||
1515 | u32 inten = smsc911x_reg_read(pdata, INT_EN); | ||
1516 | int serviced = IRQ_NONE; | ||
1517 | u32 temp; | ||
1518 | |||
1519 | if (unlikely(intsts & inten & INT_STS_SW_INT_)) { | ||
1520 | temp = smsc911x_reg_read(pdata, INT_EN); | ||
1521 | temp &= (~INT_EN_SW_INT_EN_); | ||
1522 | smsc911x_reg_write(pdata, INT_EN, temp); | ||
1523 | smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); | ||
1524 | pdata->software_irq_signal = 1; | ||
1525 | smp_wmb(); | ||
1526 | serviced = IRQ_HANDLED; | ||
1527 | } | ||
1528 | |||
1529 | if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { | ||
1530 | /* Called when there is a multicast update scheduled and | ||
1531 | * it is now safe to complete the update */ | ||
1532 | SMSC_TRACE(pdata, intr, "RX Stop interrupt"); | ||
1533 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); | ||
1534 | if (pdata->multicast_update_pending) | ||
1535 | smsc911x_rx_multicast_update_workaround(pdata); | ||
1536 | serviced = IRQ_HANDLED; | ||
1537 | } | ||
1538 | |||
1539 | if (intsts & inten & INT_STS_TDFA_) { | ||
1540 | temp = smsc911x_reg_read(pdata, FIFO_INT); | ||
1541 | temp |= FIFO_INT_TX_AVAIL_LEVEL_; | ||
1542 | smsc911x_reg_write(pdata, FIFO_INT, temp); | ||
1543 | smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); | ||
1544 | netif_wake_queue(dev); | ||
1545 | serviced = IRQ_HANDLED; | ||
1546 | } | ||
1547 | |||
1548 | if (unlikely(intsts & inten & INT_STS_RXE_)) { | ||
1549 | SMSC_TRACE(pdata, intr, "RX Error interrupt"); | ||
1550 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); | ||
1551 | serviced = IRQ_HANDLED; | ||
1552 | } | ||
1553 | |||
1554 | if (likely(intsts & inten & INT_STS_RSFL_)) { | ||
1555 | if (likely(napi_schedule_prep(&pdata->napi))) { | ||
1556 | /* Disable Rx interrupts */ | ||
1557 | temp = smsc911x_reg_read(pdata, INT_EN); | ||
1558 | temp &= (~INT_EN_RSFL_EN_); | ||
1559 | smsc911x_reg_write(pdata, INT_EN, temp); | ||
1560 | /* Schedule a NAPI poll */ | ||
1561 | __napi_schedule(&pdata->napi); | ||
1562 | } else { | ||
1563 | SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); | ||
1564 | } | ||
1565 | serviced = IRQ_HANDLED; | ||
1566 | } | ||
1567 | |||
1568 | return serviced; | ||
1569 | } | ||
1570 | |||
1517 | static int smsc911x_open(struct net_device *dev) | 1571 | static int smsc911x_open(struct net_device *dev) |
1518 | { | 1572 | { |
1519 | struct smsc911x_data *pdata = netdev_priv(dev); | 1573 | struct smsc911x_data *pdata = netdev_priv(dev); |
1520 | unsigned int timeout; | 1574 | unsigned int timeout; |
1521 | unsigned int temp; | 1575 | unsigned int temp; |
1522 | unsigned int intcfg; | 1576 | unsigned int intcfg; |
1577 | int retval; | ||
1578 | int irq_flags; | ||
1523 | 1579 | ||
1524 | /* if the phy is not yet registered, retry later*/ | 1580 | /* find and start the given phy */ |
1525 | if (!dev->phydev) { | 1581 | if (!dev->phydev) { |
1526 | SMSC_WARN(pdata, hw, "phy_dev is NULL"); | 1582 | retval = smsc911x_mii_probe(dev); |
1527 | return -EAGAIN; | 1583 | if (retval < 0) { |
1584 | SMSC_WARN(pdata, probe, "Error starting phy"); | ||
1585 | goto out; | ||
1586 | } | ||
1528 | } | 1587 | } |
1529 | 1588 | ||
1530 | /* Reset the LAN911x */ | 1589 | /* Reset the LAN911x */ |
1531 | if (smsc911x_soft_reset(pdata)) { | 1590 | retval = smsc911x_soft_reset(pdata); |
1591 | if (retval) { | ||
1532 | SMSC_WARN(pdata, hw, "soft reset failed"); | 1592 | SMSC_WARN(pdata, hw, "soft reset failed"); |
1533 | return -EIO; | 1593 | goto mii_free_out; |
1534 | } | 1594 | } |
1535 | 1595 | ||
1536 | smsc911x_reg_write(pdata, HW_CFG, 0x00050000); | 1596 | smsc911x_reg_write(pdata, HW_CFG, 0x00050000); |
@@ -1586,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev) | |||
1586 | pdata->software_irq_signal = 0; | 1646 | pdata->software_irq_signal = 0; |
1587 | smp_wmb(); | 1647 | smp_wmb(); |
1588 | 1648 | ||
1649 | irq_flags = irq_get_trigger_type(dev->irq); | ||
1650 | retval = request_irq(dev->irq, smsc911x_irqhandler, | ||
1651 | irq_flags | IRQF_SHARED, dev->name, dev); | ||
1652 | if (retval) { | ||
1653 | SMSC_WARN(pdata, probe, | ||
1654 | "Unable to claim requested irq: %d", dev->irq); | ||
1655 | goto mii_free_out; | ||
1656 | } | ||
1657 | |||
1589 | temp = smsc911x_reg_read(pdata, INT_EN); | 1658 | temp = smsc911x_reg_read(pdata, INT_EN); |
1590 | temp |= INT_EN_SW_INT_EN_; | 1659 | temp |= INT_EN_SW_INT_EN_; |
1591 | smsc911x_reg_write(pdata, INT_EN, temp); | 1660 | smsc911x_reg_write(pdata, INT_EN, temp); |
@@ -1600,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev) | |||
1600 | if (!pdata->software_irq_signal) { | 1669 | if (!pdata->software_irq_signal) { |
1601 | netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", | 1670 | netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", |
1602 | dev->irq); | 1671 | dev->irq); |
1603 | return -ENODEV; | 1672 | retval = -ENODEV; |
1673 | goto irq_stop_out; | ||
1604 | } | 1674 | } |
1605 | SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", | 1675 | SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", |
1606 | dev->irq); | 1676 | dev->irq); |
@@ -1646,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev) | |||
1646 | 1716 | ||
1647 | netif_start_queue(dev); | 1717 | netif_start_queue(dev); |
1648 | return 0; | 1718 | return 0; |
1719 | |||
1720 | irq_stop_out: | ||
1721 | free_irq(dev->irq, dev); | ||
1722 | mii_free_out: | ||
1723 | phy_disconnect(dev->phydev); | ||
1724 | dev->phydev = NULL; | ||
1725 | out: | ||
1726 | return retval; | ||
1649 | } | 1727 | } |
1650 | 1728 | ||
1651 | /* Entry point for stopping the interface */ | 1729 | /* Entry point for stopping the interface */ |
@@ -1667,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev) | |||
1667 | dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); | 1745 | dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); |
1668 | smsc911x_tx_update_txcounters(dev); | 1746 | smsc911x_tx_update_txcounters(dev); |
1669 | 1747 | ||
1748 | free_irq(dev->irq, dev); | ||
1749 | |||
1670 | /* Bring the PHY down */ | 1750 | /* Bring the PHY down */ |
1671 | if (dev->phydev) | 1751 | if (dev->phydev) { |
1672 | phy_stop(dev->phydev); | 1752 | phy_stop(dev->phydev); |
1753 | phy_disconnect(dev->phydev); | ||
1754 | dev->phydev = NULL; | ||
1755 | } | ||
1756 | netif_carrier_off(dev); | ||
1673 | 1757 | ||
1674 | SMSC_TRACE(pdata, ifdown, "Interface stopped"); | 1758 | SMSC_TRACE(pdata, ifdown, "Interface stopped"); |
1675 | return 0; | 1759 | return 0; |
@@ -1811,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev) | |||
1811 | spin_unlock_irqrestore(&pdata->mac_lock, flags); | 1895 | spin_unlock_irqrestore(&pdata->mac_lock, flags); |
1812 | } | 1896 | } |
1813 | 1897 | ||
1814 | static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) | ||
1815 | { | ||
1816 | struct net_device *dev = dev_id; | ||
1817 | struct smsc911x_data *pdata = netdev_priv(dev); | ||
1818 | u32 intsts = smsc911x_reg_read(pdata, INT_STS); | ||
1819 | u32 inten = smsc911x_reg_read(pdata, INT_EN); | ||
1820 | int serviced = IRQ_NONE; | ||
1821 | u32 temp; | ||
1822 | |||
1823 | if (unlikely(intsts & inten & INT_STS_SW_INT_)) { | ||
1824 | temp = smsc911x_reg_read(pdata, INT_EN); | ||
1825 | temp &= (~INT_EN_SW_INT_EN_); | ||
1826 | smsc911x_reg_write(pdata, INT_EN, temp); | ||
1827 | smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); | ||
1828 | pdata->software_irq_signal = 1; | ||
1829 | smp_wmb(); | ||
1830 | serviced = IRQ_HANDLED; | ||
1831 | } | ||
1832 | |||
1833 | if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { | ||
1834 | /* Called when there is a multicast update scheduled and | ||
1835 | * it is now safe to complete the update */ | ||
1836 | SMSC_TRACE(pdata, intr, "RX Stop interrupt"); | ||
1837 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); | ||
1838 | if (pdata->multicast_update_pending) | ||
1839 | smsc911x_rx_multicast_update_workaround(pdata); | ||
1840 | serviced = IRQ_HANDLED; | ||
1841 | } | ||
1842 | |||
1843 | if (intsts & inten & INT_STS_TDFA_) { | ||
1844 | temp = smsc911x_reg_read(pdata, FIFO_INT); | ||
1845 | temp |= FIFO_INT_TX_AVAIL_LEVEL_; | ||
1846 | smsc911x_reg_write(pdata, FIFO_INT, temp); | ||
1847 | smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); | ||
1848 | netif_wake_queue(dev); | ||
1849 | serviced = IRQ_HANDLED; | ||
1850 | } | ||
1851 | |||
1852 | if (unlikely(intsts & inten & INT_STS_RXE_)) { | ||
1853 | SMSC_TRACE(pdata, intr, "RX Error interrupt"); | ||
1854 | smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); | ||
1855 | serviced = IRQ_HANDLED; | ||
1856 | } | ||
1857 | |||
1858 | if (likely(intsts & inten & INT_STS_RSFL_)) { | ||
1859 | if (likely(napi_schedule_prep(&pdata->napi))) { | ||
1860 | /* Disable Rx interrupts */ | ||
1861 | temp = smsc911x_reg_read(pdata, INT_EN); | ||
1862 | temp &= (~INT_EN_RSFL_EN_); | ||
1863 | smsc911x_reg_write(pdata, INT_EN, temp); | ||
1864 | /* Schedule a NAPI poll */ | ||
1865 | __napi_schedule(&pdata->napi); | ||
1866 | } else { | ||
1867 | SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); | ||
1868 | } | ||
1869 | serviced = IRQ_HANDLED; | ||
1870 | } | ||
1871 | |||
1872 | return serviced; | ||
1873 | } | ||
1874 | |||
1875 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1898 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1876 | static void smsc911x_poll_controller(struct net_device *dev) | 1899 | static void smsc911x_poll_controller(struct net_device *dev) |
1877 | { | 1900 | { |
@@ -2291,16 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) | |||
2291 | pdata = netdev_priv(dev); | 2314 | pdata = netdev_priv(dev); |
2292 | BUG_ON(!pdata); | 2315 | BUG_ON(!pdata); |
2293 | BUG_ON(!pdata->ioaddr); | 2316 | BUG_ON(!pdata->ioaddr); |
2294 | BUG_ON(!dev->phydev); | 2317 | WARN_ON(dev->phydev); |
2295 | 2318 | ||
2296 | SMSC_TRACE(pdata, ifdown, "Stopping driver"); | 2319 | SMSC_TRACE(pdata, ifdown, "Stopping driver"); |
2297 | 2320 | ||
2298 | phy_disconnect(dev->phydev); | ||
2299 | mdiobus_unregister(pdata->mii_bus); | 2321 | mdiobus_unregister(pdata->mii_bus); |
2300 | mdiobus_free(pdata->mii_bus); | 2322 | mdiobus_free(pdata->mii_bus); |
2301 | 2323 | ||
2302 | unregister_netdev(dev); | 2324 | unregister_netdev(dev); |
2303 | free_irq(dev->irq, dev); | ||
2304 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 2325 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
2305 | "smsc911x-memory"); | 2326 | "smsc911x-memory"); |
2306 | if (!res) | 2327 | if (!res) |
@@ -2385,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) | |||
2385 | struct smsc911x_data *pdata; | 2406 | struct smsc911x_data *pdata; |
2386 | struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); | 2407 | struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); |
2387 | struct resource *res; | 2408 | struct resource *res; |
2388 | unsigned int intcfg = 0; | 2409 | int res_size, irq; |
2389 | int res_size, irq, irq_flags; | ||
2390 | int retval; | 2410 | int retval; |
2391 | 2411 | ||
2392 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 2412 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
@@ -2425,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev) | |||
2425 | 2445 | ||
2426 | pdata = netdev_priv(dev); | 2446 | pdata = netdev_priv(dev); |
2427 | dev->irq = irq; | 2447 | dev->irq = irq; |
2428 | irq_flags = irq_get_trigger_type(irq); | ||
2429 | pdata->ioaddr = ioremap_nocache(res->start, res_size); | 2448 | pdata->ioaddr = ioremap_nocache(res->start, res_size); |
2430 | 2449 | ||
2431 | pdata->dev = dev; | 2450 | pdata->dev = dev; |
@@ -2472,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev) | |||
2472 | if (retval < 0) | 2491 | if (retval < 0) |
2473 | goto out_disable_resources; | 2492 | goto out_disable_resources; |
2474 | 2493 | ||
2475 | /* configure irq polarity and type before connecting isr */ | 2494 | netif_carrier_off(dev); |
2476 | if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) | ||
2477 | intcfg |= INT_CFG_IRQ_POL_; | ||
2478 | |||
2479 | if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) | ||
2480 | intcfg |= INT_CFG_IRQ_TYPE_; | ||
2481 | |||
2482 | smsc911x_reg_write(pdata, INT_CFG, intcfg); | ||
2483 | |||
2484 | /* Ensure interrupts are globally disabled before connecting ISR */ | ||
2485 | smsc911x_disable_irq_chip(dev); | ||
2486 | 2495 | ||
2487 | retval = request_irq(dev->irq, smsc911x_irqhandler, | 2496 | retval = smsc911x_mii_init(pdev, dev); |
2488 | irq_flags | IRQF_SHARED, dev->name, dev); | ||
2489 | if (retval) { | 2497 | if (retval) { |
2490 | SMSC_WARN(pdata, probe, | 2498 | SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); |
2491 | "Unable to claim requested irq: %d", dev->irq); | ||
2492 | goto out_disable_resources; | 2499 | goto out_disable_resources; |
2493 | } | 2500 | } |
2494 | 2501 | ||
2495 | netif_carrier_off(dev); | ||
2496 | |||
2497 | retval = register_netdev(dev); | 2502 | retval = register_netdev(dev); |
2498 | if (retval) { | 2503 | if (retval) { |
2499 | SMSC_WARN(pdata, probe, "Error %i registering device", retval); | 2504 | SMSC_WARN(pdata, probe, "Error %i registering device", retval); |
2500 | goto out_free_irq; | 2505 | goto out_disable_resources; |
2501 | } else { | 2506 | } else { |
2502 | SMSC_TRACE(pdata, probe, | 2507 | SMSC_TRACE(pdata, probe, |
2503 | "Network interface: \"%s\"", dev->name); | 2508 | "Network interface: \"%s\"", dev->name); |
2504 | } | 2509 | } |
2505 | 2510 | ||
2506 | retval = smsc911x_mii_init(pdev, dev); | ||
2507 | if (retval) { | ||
2508 | SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); | ||
2509 | goto out_unregister_netdev_5; | ||
2510 | } | ||
2511 | |||
2512 | spin_lock_irq(&pdata->mac_lock); | 2511 | spin_lock_irq(&pdata->mac_lock); |
2513 | 2512 | ||
2514 | /* Check if mac address has been specified when bringing interface up */ | 2513 | /* Check if mac address has been specified when bringing interface up */ |
@@ -2544,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev) | |||
2544 | 2543 | ||
2545 | return 0; | 2544 | return 0; |
2546 | 2545 | ||
2547 | out_unregister_netdev_5: | ||
2548 | unregister_netdev(dev); | ||
2549 | out_free_irq: | ||
2550 | free_irq(dev->irq, dev); | ||
2551 | out_disable_resources: | 2546 | out_disable_resources: |
2552 | pm_runtime_put(&pdev->dev); | 2547 | pm_runtime_put(&pdev->dev); |
2553 | pm_runtime_disable(&pdev->dev); | 2548 | pm_runtime_disable(&pdev->dev); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index cbefe9e2207c..885a5e64519d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | |||
@@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) | |||
261 | } | 261 | } |
262 | if (mode & WAKE_UCAST) { | 262 | if (mode & WAKE_UCAST) { |
263 | pr_debug("GMAC: WOL on global unicast\n"); | 263 | pr_debug("GMAC: WOL on global unicast\n"); |
264 | pmt |= global_unicast; | 264 | pmt |= power_down | global_unicast | wake_up_frame_en; |
265 | } | 265 | } |
266 | 266 | ||
267 | writel(pmt, ioaddr + GMAC_PMT); | 267 | writel(pmt, ioaddr + GMAC_PMT); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index df5580dcdfed..51019b794be5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
@@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) | |||
102 | } | 102 | } |
103 | if (mode & WAKE_UCAST) { | 103 | if (mode & WAKE_UCAST) { |
104 | pr_debug("GMAC: WOL on global unicast\n"); | 104 | pr_debug("GMAC: WOL on global unicast\n"); |
105 | pmt |= global_unicast; | 105 | pmt |= power_down | global_unicast | wake_up_frame_en; |
106 | } | 106 | } |
107 | 107 | ||
108 | writel(pmt, ioaddr + GMAC_PMT); | 108 | writel(pmt, ioaddr + GMAC_PMT); |
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 9f159a775af3..4490ebaed127 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c | |||
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp) | |||
1246 | lp->mii_bus->read = &dwceqos_mdio_read; | 1246 | lp->mii_bus->read = &dwceqos_mdio_read; |
1247 | lp->mii_bus->write = &dwceqos_mdio_write; | 1247 | lp->mii_bus->write = &dwceqos_mdio_write; |
1248 | lp->mii_bus->priv = lp; | 1248 | lp->mii_bus->priv = lp; |
1249 | lp->mii_bus->parent = &lp->ndev->dev; | 1249 | lp->mii_bus->parent = &lp->pdev->dev; |
1250 | 1250 | ||
1251 | of_address_to_resource(lp->pdev->dev.of_node, 0, &res); | 1251 | of_address_to_resource(lp->pdev->dev.of_node, 0, &res); |
1252 | snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", | 1252 | snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", |
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp) | |||
1622 | DWCEQOS_MMC_CTRL_RSTONRD); | 1622 | DWCEQOS_MMC_CTRL_RSTONRD); |
1623 | dwceqos_enable_mmc_interrupt(lp); | 1623 | dwceqos_enable_mmc_interrupt(lp); |
1624 | 1624 | ||
1625 | /* Enable Interrupts */ | 1625 | dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); |
1626 | dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, | ||
1627 | DWCEQOS_DMA_CH0_IE_NIE | | ||
1628 | DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | | ||
1629 | DWCEQOS_DMA_CH0_IE_AIE | | ||
1630 | DWCEQOS_DMA_CH0_IE_FBEE); | ||
1631 | |||
1632 | dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); | 1626 | dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); |
1633 | 1627 | ||
1634 | dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | | 1628 | dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | |
@@ -1905,6 +1899,15 @@ static int dwceqos_open(struct net_device *ndev) | |||
1905 | netif_start_queue(ndev); | 1899 | netif_start_queue(ndev); |
1906 | tasklet_enable(&lp->tx_bdreclaim_tasklet); | 1900 | tasklet_enable(&lp->tx_bdreclaim_tasklet); |
1907 | 1901 | ||
1902 | /* Enable Interrupts -- do this only after we enable NAPI and the | ||
1903 | * tasklet. | ||
1904 | */ | ||
1905 | dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, | ||
1906 | DWCEQOS_DMA_CH0_IE_NIE | | ||
1907 | DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | | ||
1908 | DWCEQOS_DMA_CH0_IE_AIE | | ||
1909 | DWCEQOS_DMA_CH0_IE_FBEE); | ||
1910 | |||
1908 | return 0; | 1911 | return 0; |
1909 | } | 1912 | } |
1910 | 1913 | ||
@@ -2850,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev) | |||
2850 | 2853 | ||
2851 | ndev->features = ndev->hw_features; | 2854 | ndev->features = ndev->hw_features; |
2852 | 2855 | ||
2853 | netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); | ||
2854 | |||
2855 | ret = register_netdev(ndev); | ||
2856 | if (ret) { | ||
2857 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | ||
2858 | goto err_out_clk_dis_aper; | ||
2859 | } | ||
2860 | |||
2861 | lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); | 2856 | lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); |
2862 | if (IS_ERR(lp->phy_ref_clk)) { | 2857 | if (IS_ERR(lp->phy_ref_clk)) { |
2863 | dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); | 2858 | dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); |
2864 | ret = PTR_ERR(lp->phy_ref_clk); | 2859 | ret = PTR_ERR(lp->phy_ref_clk); |
2865 | goto err_out_unregister_netdev; | 2860 | goto err_out_clk_dis_aper; |
2866 | } | 2861 | } |
2867 | 2862 | ||
2868 | ret = clk_prepare_enable(lp->phy_ref_clk); | 2863 | ret = clk_prepare_enable(lp->phy_ref_clk); |
2869 | if (ret) { | 2864 | if (ret) { |
2870 | dev_err(&pdev->dev, "Unable to enable device clock.\n"); | 2865 | dev_err(&pdev->dev, "Unable to enable device clock.\n"); |
2871 | goto err_out_unregister_netdev; | 2866 | goto err_out_clk_dis_aper; |
2872 | } | 2867 | } |
2873 | 2868 | ||
2874 | lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, | 2869 | lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, |
@@ -2877,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev) | |||
2877 | ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); | 2872 | ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); |
2878 | if (ret < 0) { | 2873 | if (ret < 0) { |
2879 | dev_err(&pdev->dev, "invalid fixed-link"); | 2874 | dev_err(&pdev->dev, "invalid fixed-link"); |
2880 | goto err_out_unregister_clk_notifier; | 2875 | goto err_out_clk_dis_phy; |
2881 | } | 2876 | } |
2882 | 2877 | ||
2883 | lp->phy_node = of_node_get(lp->pdev->dev.of_node); | 2878 | lp->phy_node = of_node_get(lp->pdev->dev.of_node); |
@@ -2886,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev) | |||
2886 | ret = of_get_phy_mode(lp->pdev->dev.of_node); | 2881 | ret = of_get_phy_mode(lp->pdev->dev.of_node); |
2887 | if (ret < 0) { | 2882 | if (ret < 0) { |
2888 | dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); | 2883 | dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); |
2889 | goto err_out_unregister_clk_notifier; | 2884 | goto err_out_clk_dis_phy; |
2890 | } | 2885 | } |
2891 | 2886 | ||
2892 | lp->phy_interface = ret; | 2887 | lp->phy_interface = ret; |
@@ -2894,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev) | |||
2894 | ret = dwceqos_mii_init(lp); | 2889 | ret = dwceqos_mii_init(lp); |
2895 | if (ret) { | 2890 | if (ret) { |
2896 | dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); | 2891 | dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); |
2897 | goto err_out_unregister_clk_notifier; | 2892 | goto err_out_clk_dis_phy; |
2898 | } | 2893 | } |
2899 | 2894 | ||
2900 | ret = dwceqos_mii_probe(ndev); | 2895 | ret = dwceqos_mii_probe(ndev); |
2901 | if (ret != 0) { | 2896 | if (ret != 0) { |
2902 | netdev_err(ndev, "mii_probe fail.\n"); | 2897 | netdev_err(ndev, "mii_probe fail.\n"); |
2903 | ret = -ENXIO; | 2898 | ret = -ENXIO; |
2904 | goto err_out_unregister_clk_notifier; | 2899 | goto err_out_clk_dis_phy; |
2905 | } | 2900 | } |
2906 | 2901 | ||
2907 | dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); | 2902 | dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); |
@@ -2919,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev) | |||
2919 | if (ret) { | 2914 | if (ret) { |
2920 | dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", | 2915 | dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", |
2921 | ret); | 2916 | ret); |
2922 | goto err_out_unregister_clk_notifier; | 2917 | goto err_out_clk_dis_phy; |
2923 | } | 2918 | } |
2924 | dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", | 2919 | dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", |
2925 | pdev->id, ndev->base_addr, ndev->irq); | 2920 | pdev->id, ndev->base_addr, ndev->irq); |
@@ -2929,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev) | |||
2929 | if (ret) { | 2924 | if (ret) { |
2930 | dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", | 2925 | dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", |
2931 | ndev->irq, ret); | 2926 | ndev->irq, ret); |
2932 | goto err_out_unregister_clk_notifier; | 2927 | goto err_out_clk_dis_phy; |
2933 | } | 2928 | } |
2934 | 2929 | ||
2935 | if (netif_msg_probe(lp)) | 2930 | if (netif_msg_probe(lp)) |
2936 | netdev_dbg(ndev, "net_local@%p\n", lp); | 2931 | netdev_dbg(ndev, "net_local@%p\n", lp); |
2937 | 2932 | ||
2933 | netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); | ||
2934 | |||
2935 | ret = register_netdev(ndev); | ||
2936 | if (ret) { | ||
2937 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | ||
2938 | goto err_out_clk_dis_phy; | ||
2939 | } | ||
2940 | |||
2938 | return 0; | 2941 | return 0; |
2939 | 2942 | ||
2940 | err_out_unregister_clk_notifier: | 2943 | err_out_clk_dis_phy: |
2941 | clk_disable_unprepare(lp->phy_ref_clk); | 2944 | clk_disable_unprepare(lp->phy_ref_clk); |
2942 | err_out_unregister_netdev: | ||
2943 | unregister_netdev(ndev); | ||
2944 | err_out_clk_dis_aper: | 2945 | err_out_clk_dis_aper: |
2945 | clk_disable_unprepare(lp->apb_pclk); | 2946 | clk_disable_unprepare(lp->apb_pclk); |
2946 | err_out_free_netdev: | 2947 | err_out_free_netdev: |
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 7452b5f9d024..7108c68f16d3 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c | |||
@@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1987 | if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { | 1987 | if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { |
1988 | err = pci_enable_msi(pdev); | 1988 | err = pci_enable_msi(pdev); |
1989 | if (err) | 1989 | if (err) |
1990 | pr_err("Can't eneble msi. error is %d\n", err); | 1990 | pr_err("Can't enable msi. error is %d\n", err); |
1991 | else | 1991 | else |
1992 | nic->irq_type = IRQ_MSI; | 1992 | nic->irq_type = IRQ_MSI; |
1993 | } else | 1993 | } else |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c51f34693eae..f85d605e4560 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
734 | netif_receive_skb(skb); | 734 | netif_receive_skb(skb); |
735 | ndev->stats.rx_bytes += len; | 735 | ndev->stats.rx_bytes += len; |
736 | ndev->stats.rx_packets++; | 736 | ndev->stats.rx_packets++; |
737 | kmemleak_not_leak(new_skb); | ||
737 | } else { | 738 | } else { |
738 | ndev->stats.rx_dropped++; | 739 | ndev->stats.rx_dropped++; |
739 | new_skb = skb; | 740 | new_skb = skb; |
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
1325 | kfree_skb(skb); | 1326 | kfree_skb(skb); |
1326 | goto err_cleanup; | 1327 | goto err_cleanup; |
1327 | } | 1328 | } |
1329 | kmemleak_not_leak(skb); | ||
1328 | } | 1330 | } |
1329 | /* continue even if we didn't manage to submit all | 1331 | /* continue even if we didn't manage to submit all |
1330 | * receive descs | 1332 | * receive descs |
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a77145a0fa..8fd131207ee1 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c | |||
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { | |||
166 | 166 | ||
167 | static void tsi108_timed_checker(unsigned long dev_ptr); | 167 | static void tsi108_timed_checker(unsigned long dev_ptr); |
168 | 168 | ||
169 | #ifdef DEBUG | ||
169 | static void dump_eth_one(struct net_device *dev) | 170 | static void dump_eth_one(struct net_device *dev) |
170 | { | 171 | { |
171 | struct tsi108_prv_data *data = netdev_priv(dev); | 172 | struct tsi108_prv_data *data = netdev_priv(dev); |
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) | |||
190 | TSI_READ(TSI108_EC_RXESTAT), | 191 | TSI_READ(TSI108_EC_RXESTAT), |
191 | TSI_READ(TSI108_EC_RXERR), data->rxpending); | 192 | TSI_READ(TSI108_EC_RXERR), data->rxpending); |
192 | } | 193 | } |
194 | #endif | ||
193 | 195 | ||
194 | /* Synchronization is needed between the thread and up/down events. | 196 | /* Synchronization is needed between the thread and up/down events. |
195 | * Note that the PHY is accessed through the same registers for both | 197 | * Note that the PHY is accessed through the same registers for both |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 3cee84a24815..93dc10b10c09 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev) | |||
1131 | lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); | 1131 | lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); |
1132 | mac_address = of_get_mac_address(ofdev->dev.of_node); | 1132 | mac_address = of_get_mac_address(ofdev->dev.of_node); |
1133 | 1133 | ||
1134 | if (mac_address) | 1134 | if (mac_address) { |
1135 | /* Set the MAC address. */ | 1135 | /* Set the MAC address. */ |
1136 | memcpy(ndev->dev_addr, mac_address, ETH_ALEN); | 1136 | memcpy(ndev->dev_addr, mac_address, ETH_ALEN); |
1137 | else | 1137 | } else { |
1138 | dev_warn(dev, "No MAC address found\n"); | 1138 | dev_warn(dev, "No MAC address found, using random\n"); |
1139 | eth_hw_addr_random(ndev); | ||
1140 | } | ||
1139 | 1141 | ||
1140 | /* Clear the Tx CSR's in case this is a restart */ | 1142 | /* Clear the Tx CSR's in case this is a restart */ |
1141 | __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); | 1143 | __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 467fb8b4d083..591af71eae56 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -644,12 +644,6 @@ struct netvsc_reconfig { | |||
644 | u32 event; | 644 | u32 event; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | struct garp_wrk { | ||
648 | struct work_struct dwrk; | ||
649 | struct net_device *netdev; | ||
650 | struct netvsc_device *netvsc_dev; | ||
651 | }; | ||
652 | |||
653 | /* The context of the netvsc device */ | 647 | /* The context of the netvsc device */ |
654 | struct net_device_context { | 648 | struct net_device_context { |
655 | /* point back to our device context */ | 649 | /* point back to our device context */ |
@@ -667,7 +661,6 @@ struct net_device_context { | |||
667 | 661 | ||
668 | struct work_struct work; | 662 | struct work_struct work; |
669 | u32 msg_enable; /* debug level */ | 663 | u32 msg_enable; /* debug level */ |
670 | struct garp_wrk gwrk; | ||
671 | 664 | ||
672 | struct netvsc_stats __percpu *tx_stats; | 665 | struct netvsc_stats __percpu *tx_stats; |
673 | struct netvsc_stats __percpu *rx_stats; | 666 | struct netvsc_stats __percpu *rx_stats; |
@@ -678,6 +671,15 @@ struct net_device_context { | |||
678 | 671 | ||
679 | /* the device is going away */ | 672 | /* the device is going away */ |
680 | bool start_remove; | 673 | bool start_remove; |
674 | |||
675 | /* State to manage the associated VF interface. */ | ||
676 | struct net_device *vf_netdev; | ||
677 | bool vf_inject; | ||
678 | atomic_t vf_use_cnt; | ||
679 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
680 | u32 vf_alloc; | ||
681 | /* Serial number of the VF to team with */ | ||
682 | u32 vf_serial; | ||
681 | }; | 683 | }; |
682 | 684 | ||
683 | /* Per netvsc device */ | 685 | /* Per netvsc device */ |
@@ -733,15 +735,7 @@ struct netvsc_device { | |||
733 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ | 735 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ |
734 | u32 pkt_align; /* alignment bytes, e.g. 8 */ | 736 | u32 pkt_align; /* alignment bytes, e.g. 8 */ |
735 | 737 | ||
736 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
737 | u32 vf_alloc; | ||
738 | /* Serial number of the VF to team with */ | ||
739 | u32 vf_serial; | ||
740 | atomic_t open_cnt; | 738 | atomic_t open_cnt; |
741 | /* State to manage the associated VF interface. */ | ||
742 | bool vf_inject; | ||
743 | struct net_device *vf_netdev; | ||
744 | atomic_t vf_use_cnt; | ||
745 | }; | 739 | }; |
746 | 740 | ||
747 | static inline struct netvsc_device * | 741 | static inline struct netvsc_device * |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 20e09174ff62..410fb8e81376 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void) | |||
77 | init_waitqueue_head(&net_device->wait_drain); | 77 | init_waitqueue_head(&net_device->wait_drain); |
78 | net_device->destroy = false; | 78 | net_device->destroy = false; |
79 | atomic_set(&net_device->open_cnt, 0); | 79 | atomic_set(&net_device->open_cnt, 0); |
80 | atomic_set(&net_device->vf_use_cnt, 0); | ||
81 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 80 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
82 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 81 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
83 | 82 | ||
84 | net_device->vf_netdev = NULL; | ||
85 | net_device->vf_inject = false; | ||
86 | |||
87 | return net_device; | 83 | return net_device; |
88 | } | 84 | } |
89 | 85 | ||
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev, | |||
1106 | nvscdev->send_table[i] = tab[i]; | 1102 | nvscdev->send_table[i] = tab[i]; |
1107 | } | 1103 | } |
1108 | 1104 | ||
1109 | static void netvsc_send_vf(struct netvsc_device *nvdev, | 1105 | static void netvsc_send_vf(struct net_device_context *net_device_ctx, |
1110 | struct nvsp_message *nvmsg) | 1106 | struct nvsp_message *nvmsg) |
1111 | { | 1107 | { |
1112 | nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; | 1108 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
1113 | nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | 1109 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | static inline void netvsc_receive_inband(struct hv_device *hdev, | 1112 | static inline void netvsc_receive_inband(struct hv_device *hdev, |
1117 | struct netvsc_device *nvdev, | 1113 | struct net_device_context *net_device_ctx, |
1118 | struct nvsp_message *nvmsg) | 1114 | struct nvsp_message *nvmsg) |
1119 | { | 1115 | { |
1120 | switch (nvmsg->hdr.msg_type) { | 1116 | switch (nvmsg->hdr.msg_type) { |
1121 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: | 1117 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: |
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev, | |||
1123 | break; | 1119 | break; |
1124 | 1120 | ||
1125 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: | 1121 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: |
1126 | netvsc_send_vf(nvdev, nvmsg); | 1122 | netvsc_send_vf(net_device_ctx, nvmsg); |
1127 | break; | 1123 | break; |
1128 | } | 1124 | } |
1129 | } | 1125 | } |
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1136 | struct vmpacket_descriptor *desc) | 1132 | struct vmpacket_descriptor *desc) |
1137 | { | 1133 | { |
1138 | struct nvsp_message *nvmsg; | 1134 | struct nvsp_message *nvmsg; |
1135 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | ||
1139 | 1136 | ||
1140 | nvmsg = (struct nvsp_message *)((unsigned long) | 1137 | nvmsg = (struct nvsp_message *)((unsigned long) |
1141 | desc + (desc->offset8 << 3)); | 1138 | desc + (desc->offset8 << 3)); |
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1150 | break; | 1147 | break; |
1151 | 1148 | ||
1152 | case VM_PKT_DATA_INBAND: | 1149 | case VM_PKT_DATA_INBAND: |
1153 | netvsc_receive_inband(device, net_device, nvmsg); | 1150 | netvsc_receive_inband(device, net_device_ctx, nvmsg); |
1154 | break; | 1151 | break; |
1155 | 1152 | ||
1156 | default: | 1153 | default: |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 41bd952cc28d..3ba29fc80d05 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
658 | struct sk_buff *skb; | 658 | struct sk_buff *skb; |
659 | struct sk_buff *vf_skb; | 659 | struct sk_buff *vf_skb; |
660 | struct netvsc_stats *rx_stats; | 660 | struct netvsc_stats *rx_stats; |
661 | struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; | ||
662 | u32 bytes_recvd = packet->total_data_buflen; | 661 | u32 bytes_recvd = packet->total_data_buflen; |
663 | int ret = 0; | 662 | int ret = 0; |
664 | 663 | ||
665 | if (!net || net->reg_state != NETREG_REGISTERED) | 664 | if (!net || net->reg_state != NETREG_REGISTERED) |
666 | return NVSP_STAT_FAIL; | 665 | return NVSP_STAT_FAIL; |
667 | 666 | ||
668 | if (READ_ONCE(netvsc_dev->vf_inject)) { | 667 | if (READ_ONCE(net_device_ctx->vf_inject)) { |
669 | atomic_inc(&netvsc_dev->vf_use_cnt); | 668 | atomic_inc(&net_device_ctx->vf_use_cnt); |
670 | if (!READ_ONCE(netvsc_dev->vf_inject)) { | 669 | if (!READ_ONCE(net_device_ctx->vf_inject)) { |
671 | /* | 670 | /* |
672 | * We raced; just move on. | 671 | * We raced; just move on. |
673 | */ | 672 | */ |
674 | atomic_dec(&netvsc_dev->vf_use_cnt); | 673 | atomic_dec(&net_device_ctx->vf_use_cnt); |
675 | goto vf_injection_done; | 674 | goto vf_injection_done; |
676 | } | 675 | } |
677 | 676 | ||
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
683 | * the host). Deliver these via the VF interface | 682 | * the host). Deliver these via the VF interface |
684 | * in the guest. | 683 | * in the guest. |
685 | */ | 684 | */ |
686 | vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, | 685 | vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev, |
687 | csum_info, *data, vlan_tci); | 686 | packet, csum_info, *data, |
687 | vlan_tci); | ||
688 | if (vf_skb != NULL) { | 688 | if (vf_skb != NULL) { |
689 | ++netvsc_dev->vf_netdev->stats.rx_packets; | 689 | ++net_device_ctx->vf_netdev->stats.rx_packets; |
690 | netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; | 690 | net_device_ctx->vf_netdev->stats.rx_bytes += |
691 | bytes_recvd; | ||
691 | netif_receive_skb(vf_skb); | 692 | netif_receive_skb(vf_skb); |
692 | } else { | 693 | } else { |
693 | ++net->stats.rx_dropped; | 694 | ++net->stats.rx_dropped; |
694 | ret = NVSP_STAT_FAIL; | 695 | ret = NVSP_STAT_FAIL; |
695 | } | 696 | } |
696 | atomic_dec(&netvsc_dev->vf_use_cnt); | 697 | atomic_dec(&net_device_ctx->vf_use_cnt); |
697 | return ret; | 698 | return ret; |
698 | } | 699 | } |
699 | 700 | ||
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev) | |||
1150 | free_netdev(netdev); | 1151 | free_netdev(netdev); |
1151 | } | 1152 | } |
1152 | 1153 | ||
1153 | static void netvsc_notify_peers(struct work_struct *wrk) | ||
1154 | { | ||
1155 | struct garp_wrk *gwrk; | ||
1156 | |||
1157 | gwrk = container_of(wrk, struct garp_wrk, dwrk); | ||
1158 | |||
1159 | netdev_notify_peers(gwrk->netdev); | ||
1160 | |||
1161 | atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); | ||
1162 | } | ||
1163 | |||
1164 | static struct net_device *get_netvsc_net_device(char *mac) | 1154 | static struct net_device *get_netvsc_net_device(char *mac) |
1165 | { | 1155 | { |
1166 | struct net_device *dev, *found = NULL; | 1156 | struct net_device *dev, *found = NULL; |
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1203 | 1193 | ||
1204 | net_device_ctx = netdev_priv(ndev); | 1194 | net_device_ctx = netdev_priv(ndev); |
1205 | netvsc_dev = net_device_ctx->nvdev; | 1195 | netvsc_dev = net_device_ctx->nvdev; |
1206 | if (netvsc_dev == NULL) | 1196 | if (!netvsc_dev || net_device_ctx->vf_netdev) |
1207 | return NOTIFY_DONE; | 1197 | return NOTIFY_DONE; |
1208 | 1198 | ||
1209 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); | 1199 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); |
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1211 | * Take a reference on the module. | 1201 | * Take a reference on the module. |
1212 | */ | 1202 | */ |
1213 | try_module_get(THIS_MODULE); | 1203 | try_module_get(THIS_MODULE); |
1214 | netvsc_dev->vf_netdev = vf_netdev; | 1204 | net_device_ctx->vf_netdev = vf_netdev; |
1215 | return NOTIFY_OK; | 1205 | return NOTIFY_OK; |
1216 | } | 1206 | } |
1217 | 1207 | ||
1208 | static void netvsc_inject_enable(struct net_device_context *net_device_ctx) | ||
1209 | { | ||
1210 | net_device_ctx->vf_inject = true; | ||
1211 | } | ||
1212 | |||
1213 | static void netvsc_inject_disable(struct net_device_context *net_device_ctx) | ||
1214 | { | ||
1215 | net_device_ctx->vf_inject = false; | ||
1216 | |||
1217 | /* Wait for currently active users to drain out. */ | ||
1218 | while (atomic_read(&net_device_ctx->vf_use_cnt) != 0) | ||
1219 | udelay(50); | ||
1220 | } | ||
1218 | 1221 | ||
1219 | static int netvsc_vf_up(struct net_device *vf_netdev) | 1222 | static int netvsc_vf_up(struct net_device *vf_netdev) |
1220 | { | 1223 | { |
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1233 | net_device_ctx = netdev_priv(ndev); | 1236 | net_device_ctx = netdev_priv(ndev); |
1234 | netvsc_dev = net_device_ctx->nvdev; | 1237 | netvsc_dev = net_device_ctx->nvdev; |
1235 | 1238 | ||
1236 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1239 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1237 | return NOTIFY_DONE; | 1240 | return NOTIFY_DONE; |
1238 | 1241 | ||
1239 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); | 1242 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); |
1240 | netvsc_dev->vf_inject = true; | 1243 | netvsc_inject_enable(net_device_ctx); |
1241 | 1244 | ||
1242 | /* | 1245 | /* |
1243 | * Open the device before switching data path. | 1246 | * Open the device before switching data path. |
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1252 | 1255 | ||
1253 | netif_carrier_off(ndev); | 1256 | netif_carrier_off(ndev); |
1254 | 1257 | ||
1255 | /* | 1258 | /* Now notify peers through VF device. */ |
1256 | * Now notify peers. We are scheduling work to | 1259 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); |
1257 | * notify peers; take a reference to prevent | ||
1258 | * the VF interface from vanishing. | ||
1259 | */ | ||
1260 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1261 | net_device_ctx->gwrk.netdev = vf_netdev; | ||
1262 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1263 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1264 | 1260 | ||
1265 | return NOTIFY_OK; | 1261 | return NOTIFY_OK; |
1266 | } | 1262 | } |
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev) | |||
1283 | net_device_ctx = netdev_priv(ndev); | 1279 | net_device_ctx = netdev_priv(ndev); |
1284 | netvsc_dev = net_device_ctx->nvdev; | 1280 | netvsc_dev = net_device_ctx->nvdev; |
1285 | 1281 | ||
1286 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1282 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1287 | return NOTIFY_DONE; | 1283 | return NOTIFY_DONE; |
1288 | 1284 | ||
1289 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); | 1285 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); |
1290 | netvsc_dev->vf_inject = false; | 1286 | netvsc_inject_disable(net_device_ctx); |
1291 | /* | ||
1292 | * Wait for currently active users to | ||
1293 | * drain out. | ||
1294 | */ | ||
1295 | |||
1296 | while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) | ||
1297 | udelay(50); | ||
1298 | netvsc_switch_datapath(ndev, false); | 1287 | netvsc_switch_datapath(ndev, false); |
1299 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); | 1288 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); |
1300 | rndis_filter_close(netvsc_dev); | 1289 | rndis_filter_close(netvsc_dev); |
1301 | netif_carrier_on(ndev); | 1290 | netif_carrier_on(ndev); |
1302 | /* | 1291 | |
1303 | * Notify peers. | 1292 | /* Now notify peers through netvsc device. */ |
1304 | */ | 1293 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); |
1305 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1306 | net_device_ctx->gwrk.netdev = ndev; | ||
1307 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1308 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1309 | 1294 | ||
1310 | return NOTIFY_OK; | 1295 | return NOTIFY_OK; |
1311 | } | 1296 | } |
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) | |||
1327 | 1312 | ||
1328 | net_device_ctx = netdev_priv(ndev); | 1313 | net_device_ctx = netdev_priv(ndev); |
1329 | netvsc_dev = net_device_ctx->nvdev; | 1314 | netvsc_dev = net_device_ctx->nvdev; |
1330 | if (netvsc_dev == NULL) | 1315 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1331 | return NOTIFY_DONE; | 1316 | return NOTIFY_DONE; |
1332 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); | 1317 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); |
1333 | 1318 | netvsc_inject_disable(net_device_ctx); | |
1334 | netvsc_dev->vf_netdev = NULL; | 1319 | net_device_ctx->vf_netdev = NULL; |
1335 | module_put(THIS_MODULE); | 1320 | module_put(THIS_MODULE); |
1336 | return NOTIFY_OK; | 1321 | return NOTIFY_OK; |
1337 | } | 1322 | } |
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev, | |||
1377 | 1362 | ||
1378 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 1363 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
1379 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | 1364 | INIT_WORK(&net_device_ctx->work, do_set_multicast); |
1380 | INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); | ||
1381 | 1365 | ||
1382 | spin_lock_init(&net_device_ctx->lock); | 1366 | spin_lock_init(&net_device_ctx->lock); |
1383 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); | 1367 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); |
1384 | 1368 | ||
1369 | atomic_set(&net_device_ctx->vf_use_cnt, 0); | ||
1370 | net_device_ctx->vf_netdev = NULL; | ||
1371 | net_device_ctx->vf_inject = false; | ||
1372 | |||
1385 | net->netdev_ops = &device_ops; | 1373 | net->netdev_ops = &device_ops; |
1386 | 1374 | ||
1387 | net->hw_features = NETVSC_HW_FEATURES; | 1375 | net->hw_features = NETVSC_HW_FEATURES; |
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this, | |||
1494 | { | 1482 | { |
1495 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); | 1483 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); |
1496 | 1484 | ||
1497 | /* Avoid Vlan, Bonding dev with same MAC registering as VF */ | 1485 | /* Avoid Vlan dev with same MAC registering as VF */ |
1498 | if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) | 1486 | if (event_dev->priv_flags & IFF_802_1Q_VLAN) |
1487 | return NOTIFY_DONE; | ||
1488 | |||
1489 | /* Avoid Bonding master dev with same MAC registering as VF */ | ||
1490 | if (event_dev->priv_flags & IFF_BONDING && | ||
1491 | event_dev->flags & IFF_MASTER) | ||
1499 | return NOTIFY_DONE; | 1492 | return NOTIFY_DONE; |
1500 | 1493 | ||
1501 | switch (event) { | 1494 | switch (event) { |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index d13e6e15d7b5..351e701eb043 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -270,6 +270,7 @@ struct macsec_dev { | |||
270 | struct pcpu_secy_stats __percpu *stats; | 270 | struct pcpu_secy_stats __percpu *stats; |
271 | struct list_head secys; | 271 | struct list_head secys; |
272 | struct gro_cells gro_cells; | 272 | struct gro_cells gro_cells; |
273 | unsigned int nest_level; | ||
273 | }; | 274 | }; |
274 | 275 | ||
275 | /** | 276 | /** |
@@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, | |||
2699 | 2700 | ||
2700 | #define MACSEC_FEATURES \ | 2701 | #define MACSEC_FEATURES \ |
2701 | (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) | 2702 | (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) |
2703 | static struct lock_class_key macsec_netdev_addr_lock_key; | ||
2704 | |||
2702 | static int macsec_dev_init(struct net_device *dev) | 2705 | static int macsec_dev_init(struct net_device *dev) |
2703 | { | 2706 | { |
2704 | struct macsec_dev *macsec = macsec_priv(dev); | 2707 | struct macsec_dev *macsec = macsec_priv(dev); |
@@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev) | |||
2910 | return macsec_priv(dev)->real_dev->ifindex; | 2913 | return macsec_priv(dev)->real_dev->ifindex; |
2911 | } | 2914 | } |
2912 | 2915 | ||
2916 | |||
2917 | static int macsec_get_nest_level(struct net_device *dev) | ||
2918 | { | ||
2919 | return macsec_priv(dev)->nest_level; | ||
2920 | } | ||
2921 | |||
2922 | |||
2913 | static const struct net_device_ops macsec_netdev_ops = { | 2923 | static const struct net_device_ops macsec_netdev_ops = { |
2914 | .ndo_init = macsec_dev_init, | 2924 | .ndo_init = macsec_dev_init, |
2915 | .ndo_uninit = macsec_dev_uninit, | 2925 | .ndo_uninit = macsec_dev_uninit, |
@@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = { | |||
2923 | .ndo_start_xmit = macsec_start_xmit, | 2933 | .ndo_start_xmit = macsec_start_xmit, |
2924 | .ndo_get_stats64 = macsec_get_stats64, | 2934 | .ndo_get_stats64 = macsec_get_stats64, |
2925 | .ndo_get_iflink = macsec_get_iflink, | 2935 | .ndo_get_iflink = macsec_get_iflink, |
2936 | .ndo_get_lock_subclass = macsec_get_nest_level, | ||
2926 | }; | 2937 | }; |
2927 | 2938 | ||
2928 | static const struct device_type macsec_type = { | 2939 | static const struct device_type macsec_type = { |
@@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec) | |||
3047 | } | 3058 | } |
3048 | } | 3059 | } |
3049 | 3060 | ||
3061 | static void macsec_common_dellink(struct net_device *dev, struct list_head *head) | ||
3062 | { | ||
3063 | struct macsec_dev *macsec = macsec_priv(dev); | ||
3064 | struct net_device *real_dev = macsec->real_dev; | ||
3065 | |||
3066 | unregister_netdevice_queue(dev, head); | ||
3067 | list_del_rcu(&macsec->secys); | ||
3068 | macsec_del_dev(macsec); | ||
3069 | netdev_upper_dev_unlink(real_dev, dev); | ||
3070 | |||
3071 | macsec_generation++; | ||
3072 | } | ||
3073 | |||
3050 | static void macsec_dellink(struct net_device *dev, struct list_head *head) | 3074 | static void macsec_dellink(struct net_device *dev, struct list_head *head) |
3051 | { | 3075 | { |
3052 | struct macsec_dev *macsec = macsec_priv(dev); | 3076 | struct macsec_dev *macsec = macsec_priv(dev); |
3053 | struct net_device *real_dev = macsec->real_dev; | 3077 | struct net_device *real_dev = macsec->real_dev; |
3054 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); | 3078 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); |
3055 | 3079 | ||
3056 | macsec_generation++; | 3080 | macsec_common_dellink(dev, head); |
3057 | 3081 | ||
3058 | unregister_netdevice_queue(dev, head); | ||
3059 | list_del_rcu(&macsec->secys); | ||
3060 | if (list_empty(&rxd->secys)) { | 3082 | if (list_empty(&rxd->secys)) { |
3061 | netdev_rx_handler_unregister(real_dev); | 3083 | netdev_rx_handler_unregister(real_dev); |
3062 | kfree(rxd); | 3084 | kfree(rxd); |
3063 | } | 3085 | } |
3064 | |||
3065 | macsec_del_dev(macsec); | ||
3066 | } | 3086 | } |
3067 | 3087 | ||
3068 | static int register_macsec_dev(struct net_device *real_dev, | 3088 | static int register_macsec_dev(struct net_device *real_dev, |
@@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3181 | 3201 | ||
3182 | dev_hold(real_dev); | 3202 | dev_hold(real_dev); |
3183 | 3203 | ||
3204 | macsec->nest_level = dev_get_nest_level(real_dev) + 1; | ||
3205 | netdev_lockdep_set_classes(dev); | ||
3206 | lockdep_set_class_and_subclass(&dev->addr_list_lock, | ||
3207 | &macsec_netdev_addr_lock_key, | ||
3208 | macsec_get_nest_level(dev)); | ||
3209 | |||
3210 | err = netdev_upper_dev_link(real_dev, dev); | ||
3211 | if (err < 0) | ||
3212 | goto unregister; | ||
3213 | |||
3184 | /* need to be already registered so that ->init has run and | 3214 | /* need to be already registered so that ->init has run and |
3185 | * the MAC addr is set | 3215 | * the MAC addr is set |
3186 | */ | 3216 | */ |
@@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3193 | 3223 | ||
3194 | if (rx_handler && sci_exists(real_dev, sci)) { | 3224 | if (rx_handler && sci_exists(real_dev, sci)) { |
3195 | err = -EBUSY; | 3225 | err = -EBUSY; |
3196 | goto unregister; | 3226 | goto unlink; |
3197 | } | 3227 | } |
3198 | 3228 | ||
3199 | err = macsec_add_dev(dev, sci, icv_len); | 3229 | err = macsec_add_dev(dev, sci, icv_len); |
3200 | if (err) | 3230 | if (err) |
3201 | goto unregister; | 3231 | goto unlink; |
3202 | 3232 | ||
3203 | if (data) | 3233 | if (data) |
3204 | macsec_changelink_common(dev, data); | 3234 | macsec_changelink_common(dev, data); |
@@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3213 | 3243 | ||
3214 | del_dev: | 3244 | del_dev: |
3215 | macsec_del_dev(macsec); | 3245 | macsec_del_dev(macsec); |
3246 | unlink: | ||
3247 | netdev_upper_dev_unlink(real_dev, dev); | ||
3216 | unregister: | 3248 | unregister: |
3217 | unregister_netdevice(dev); | 3249 | unregister_netdevice(dev); |
3218 | return err; | 3250 | return err; |
@@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, | |||
3382 | 3414 | ||
3383 | rxd = macsec_data_rtnl(real_dev); | 3415 | rxd = macsec_data_rtnl(real_dev); |
3384 | list_for_each_entry_safe(m, n, &rxd->secys, secys) { | 3416 | list_for_each_entry_safe(m, n, &rxd->secys, secys) { |
3385 | macsec_dellink(m->secy.netdev, &head); | 3417 | macsec_common_dellink(m->secy.netdev, &head); |
3386 | } | 3418 | } |
3419 | |||
3420 | netdev_rx_handler_unregister(real_dev); | ||
3421 | kfree(rxd); | ||
3422 | |||
3387 | unregister_netdevice_many(&head); | 3423 | unregister_netdevice_many(&head); |
3388 | break; | 3424 | break; |
3389 | } | 3425 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cd9b53834bf6..3234fcdea317 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1315 | vlan->dev = dev; | 1315 | vlan->dev = dev; |
1316 | vlan->port = port; | 1316 | vlan->port = port; |
1317 | vlan->set_features = MACVLAN_FEATURES; | 1317 | vlan->set_features = MACVLAN_FEATURES; |
1318 | vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; | 1318 | vlan->nest_level = dev_get_nest_level(lowerdev) + 1; |
1319 | 1319 | ||
1320 | vlan->mode = MACVLAN_MODE_VEPA; | 1320 | vlan->mode = MACVLAN_MODE_VEPA; |
1321 | if (data && data[IFLA_MACVLAN_MODE]) | 1321 | if (data && data[IFLA_MACVLAN_MODE]) |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a38c0dac514b..070e3290aa6e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q) | |||
275 | rtnl_unlock(); | 275 | rtnl_unlock(); |
276 | 276 | ||
277 | synchronize_rcu(); | 277 | synchronize_rcu(); |
278 | skb_array_cleanup(&q->skb_array); | ||
279 | sock_put(&q->sk); | 278 | sock_put(&q->sk); |
280 | } | 279 | } |
281 | 280 | ||
@@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk) | |||
533 | static void macvtap_sock_destruct(struct sock *sk) | 532 | static void macvtap_sock_destruct(struct sock *sk) |
534 | { | 533 | { |
535 | struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); | 534 | struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); |
536 | struct sk_buff *skb; | ||
537 | 535 | ||
538 | while ((skb = skb_array_consume(&q->skb_array)) != NULL) | 536 | skb_array_cleanup(&q->skb_array); |
539 | kfree_skb(skb); | ||
540 | } | 537 | } |
541 | 538 | ||
542 | static int macvtap_open(struct inode *inode, struct file *file) | 539 | static int macvtap_open(struct inode *inode, struct file *file) |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 47a64342cc16..b4863e4e522b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -303,6 +303,7 @@ config MDIO_HISI_FEMAC | |||
303 | 303 | ||
304 | config MDIO_XGENE | 304 | config MDIO_XGENE |
305 | tristate "APM X-Gene SoC MDIO bus controller" | 305 | tristate "APM X-Gene SoC MDIO bus controller" |
306 | depends on ARCH_XGENE || COMPILE_TEST | ||
306 | help | 307 | help |
307 | This module provides a driver for the MDIO busses found in the | 308 | This module provides a driver for the MDIO busses found in the |
308 | APM X-Gene SoC's. | 309 | APM X-Gene SoC's. |
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index 775674808249..92af182951be 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c | |||
@@ -424,10 +424,8 @@ static int xgene_mdio_remove(struct platform_device *pdev) | |||
424 | mdiobus_unregister(mdio_bus); | 424 | mdiobus_unregister(mdio_bus); |
425 | mdiobus_free(mdio_bus); | 425 | mdiobus_free(mdio_bus); |
426 | 426 | ||
427 | if (dev->of_node) { | 427 | if (dev->of_node) |
428 | if (IS_ERR(pdata->clk)) | 428 | clk_disable_unprepare(pdata->clk); |
429 | clk_disable_unprepare(pdata->clk); | ||
430 | } | ||
431 | 429 | ||
432 | return 0; | 430 | return 0; |
433 | } | 431 | } |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1882d9828c99..885ac9cbab5a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev, | |||
677 | data[i] = kszphy_get_stat(phydev, i); | 677 | data[i] = kszphy_get_stat(phydev, i); |
678 | } | 678 | } |
679 | 679 | ||
680 | static int kszphy_resume(struct phy_device *phydev) | 680 | static int kszphy_suspend(struct phy_device *phydev) |
681 | { | 681 | { |
682 | int value; | 682 | /* Disable PHY Interrupts */ |
683 | if (phy_interrupt_is_valid(phydev)) { | ||
684 | phydev->interrupts = PHY_INTERRUPT_DISABLED; | ||
685 | if (phydev->drv->config_intr) | ||
686 | phydev->drv->config_intr(phydev); | ||
687 | } | ||
683 | 688 | ||
684 | mutex_lock(&phydev->lock); | 689 | return genphy_suspend(phydev); |
690 | } | ||
685 | 691 | ||
686 | value = phy_read(phydev, MII_BMCR); | 692 | static int kszphy_resume(struct phy_device *phydev) |
687 | phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); | 693 | { |
694 | genphy_resume(phydev); | ||
688 | 695 | ||
689 | kszphy_config_intr(phydev); | 696 | /* Enable PHY Interrupts */ |
690 | mutex_unlock(&phydev->lock); | 697 | if (phy_interrupt_is_valid(phydev)) { |
698 | phydev->interrupts = PHY_INTERRUPT_ENABLED; | ||
699 | if (phydev->drv->config_intr) | ||
700 | phydev->drv->config_intr(phydev); | ||
701 | } | ||
691 | 702 | ||
692 | return 0; | 703 | return 0; |
693 | } | 704 | } |
@@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = { | |||
900 | .get_sset_count = kszphy_get_sset_count, | 911 | .get_sset_count = kszphy_get_sset_count, |
901 | .get_strings = kszphy_get_strings, | 912 | .get_strings = kszphy_get_strings, |
902 | .get_stats = kszphy_get_stats, | 913 | .get_stats = kszphy_get_stats, |
903 | .suspend = genphy_suspend, | 914 | .suspend = kszphy_suspend, |
904 | .resume = kszphy_resume, | 915 | .resume = kszphy_resume, |
905 | }, { | 916 | }, { |
906 | .phy_id = PHY_ID_KSZ8061, | 917 | .phy_id = PHY_ID_KSZ8061, |
@@ -953,7 +964,7 @@ static struct phy_driver ksphy_driver[] = { | |||
953 | .get_strings = kszphy_get_strings, | 964 | .get_strings = kszphy_get_strings, |
954 | .get_stats = kszphy_get_stats, | 965 | .get_stats = kszphy_get_stats, |
955 | .suspend = genphy_suspend, | 966 | .suspend = genphy_suspend, |
956 | .resume = genphy_resume, | 967 | .resume = kszphy_resume, |
957 | }, { | 968 | }, { |
958 | .phy_id = PHY_ID_KSZ8873MLL, | 969 | .phy_id = PHY_ID_KSZ8873MLL, |
959 | .phy_id_mask = MICREL_PHY_ID_MASK, | 970 | .phy_id_mask = MICREL_PHY_ID_MASK, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c5dc2c363f96..c6f66832a1a6 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -722,8 +722,10 @@ phy_err: | |||
722 | int phy_start_interrupts(struct phy_device *phydev) | 722 | int phy_start_interrupts(struct phy_device *phydev) |
723 | { | 723 | { |
724 | atomic_set(&phydev->irq_disable, 0); | 724 | atomic_set(&phydev->irq_disable, 0); |
725 | if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", | 725 | if (request_irq(phydev->irq, phy_interrupt, |
726 | phydev) < 0) { | 726 | IRQF_SHARED, |
727 | "phy_interrupt", | ||
728 | phydev) < 0) { | ||
727 | pr_warn("%s: Can't get IRQ %d (PHY)\n", | 729 | pr_warn("%s: Can't get IRQ %d (PHY)\n", |
728 | phydev->mdio.bus->name, phydev->irq); | 730 | phydev->mdio.bus->name, phydev->irq); |
729 | phydev->irq = PHY_POLL; | 731 | phydev->irq = PHY_POLL; |
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index cdb19b385d42..b228bea7931f 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c | |||
@@ -14,9 +14,23 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/etherdevice.h> | ||
17 | #include <linux/filter.h> | 18 | #include <linux/filter.h> |
18 | #include <linux/if_team.h> | 19 | #include <linux/if_team.h> |
19 | 20 | ||
21 | static rx_handler_result_t lb_receive(struct team *team, struct team_port *port, | ||
22 | struct sk_buff *skb) | ||
23 | { | ||
24 | if (unlikely(skb->protocol == htons(ETH_P_SLOW))) { | ||
25 | /* LACPDU packets should go to exact delivery */ | ||
26 | const unsigned char *dest = eth_hdr(skb)->h_dest; | ||
27 | |||
28 | if (is_link_local_ether_addr(dest) && dest[5] == 0x02) | ||
29 | return RX_HANDLER_EXACT; | ||
30 | } | ||
31 | return RX_HANDLER_ANOTHER; | ||
32 | } | ||
33 | |||
20 | struct lb_priv; | 34 | struct lb_priv; |
21 | 35 | ||
22 | typedef struct team_port *lb_select_tx_port_func_t(struct team *, | 36 | typedef struct team_port *lb_select_tx_port_func_t(struct team *, |
@@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = { | |||
652 | .port_enter = lb_port_enter, | 666 | .port_enter = lb_port_enter, |
653 | .port_leave = lb_port_leave, | 667 | .port_leave = lb_port_leave, |
654 | .port_disabled = lb_port_disabled, | 668 | .port_disabled = lb_port_disabled, |
669 | .receive = lb_receive, | ||
655 | .transmit = lb_transmit, | 670 | .transmit = lb_transmit, |
656 | }; | 671 | }; |
657 | 672 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 9c8b5bc2b9d8..6f9df375c5d4 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -894,11 +894,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
894 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) | 894 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) |
895 | goto drop; | 895 | goto drop; |
896 | 896 | ||
897 | if (skb->sk && sk_fullsock(skb->sk)) { | 897 | skb_tx_timestamp(skb); |
898 | sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags, | ||
899 | &skb_shinfo(skb)->tx_flags); | ||
900 | sw_tx_timestamp(skb); | ||
901 | } | ||
902 | 898 | ||
903 | /* Orphan the skb - required as we might hang on to it | 899 | /* Orphan the skb - required as we might hang on to it |
904 | * for indefinite time. | 900 | * for indefinite time. |
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 770212baaf05..528b9c9c4e60 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -1009,6 +1009,7 @@ static int kaweth_probe( | |||
1009 | struct net_device *netdev; | 1009 | struct net_device *netdev; |
1010 | const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; | 1010 | const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
1011 | int result = 0; | 1011 | int result = 0; |
1012 | int rv = -EIO; | ||
1012 | 1013 | ||
1013 | dev_dbg(dev, | 1014 | dev_dbg(dev, |
1014 | "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", | 1015 | "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", |
@@ -1029,6 +1030,7 @@ static int kaweth_probe( | |||
1029 | kaweth = netdev_priv(netdev); | 1030 | kaweth = netdev_priv(netdev); |
1030 | kaweth->dev = udev; | 1031 | kaweth->dev = udev; |
1031 | kaweth->net = netdev; | 1032 | kaweth->net = netdev; |
1033 | kaweth->intf = intf; | ||
1032 | 1034 | ||
1033 | spin_lock_init(&kaweth->device_lock); | 1035 | spin_lock_init(&kaweth->device_lock); |
1034 | init_waitqueue_head(&kaweth->term_wait); | 1036 | init_waitqueue_head(&kaweth->term_wait); |
@@ -1048,6 +1050,10 @@ static int kaweth_probe( | |||
1048 | /* Download the firmware */ | 1050 | /* Download the firmware */ |
1049 | dev_info(dev, "Downloading firmware...\n"); | 1051 | dev_info(dev, "Downloading firmware...\n"); |
1050 | kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); | 1052 | kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); |
1053 | if (!kaweth->firmware_buf) { | ||
1054 | rv = -ENOMEM; | ||
1055 | goto err_free_netdev; | ||
1056 | } | ||
1051 | if ((result = kaweth_download_firmware(kaweth, | 1057 | if ((result = kaweth_download_firmware(kaweth, |
1052 | "kaweth/new_code.bin", | 1058 | "kaweth/new_code.bin", |
1053 | 100, | 1059 | 100, |
@@ -1139,8 +1145,6 @@ err_fw: | |||
1139 | 1145 | ||
1140 | dev_dbg(dev, "Initializing net device.\n"); | 1146 | dev_dbg(dev, "Initializing net device.\n"); |
1141 | 1147 | ||
1142 | kaweth->intf = intf; | ||
1143 | |||
1144 | kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); | 1148 | kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); |
1145 | if (!kaweth->tx_urb) | 1149 | if (!kaweth->tx_urb) |
1146 | goto err_free_netdev; | 1150 | goto err_free_netdev; |
@@ -1204,7 +1208,7 @@ err_only_tx: | |||
1204 | err_free_netdev: | 1208 | err_free_netdev: |
1205 | free_netdev(netdev); | 1209 | free_netdev(netdev); |
1206 | 1210 | ||
1207 | return -EIO; | 1211 | return rv; |
1208 | } | 1212 | } |
1209 | 1213 | ||
1210 | /**************************************************************** | 1214 | /**************************************************************** |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f41a8ad4740e..c254248863d4 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #define NETNEXT_VERSION "08" | 32 | #define NETNEXT_VERSION "08" |
33 | 33 | ||
34 | /* Information for net */ | 34 | /* Information for net */ |
35 | #define NET_VERSION "5" | 35 | #define NET_VERSION "6" |
36 | 36 | ||
37 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION | 37 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION |
38 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 38 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
@@ -2552,6 +2552,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable) | |||
2552 | } | 2552 | } |
2553 | } | 2553 | } |
2554 | 2554 | ||
2555 | static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) | ||
2556 | { | ||
2557 | ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); | ||
2558 | ocp_reg_write(tp, OCP_EEE_DATA, reg); | ||
2559 | ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); | ||
2560 | } | ||
2561 | |||
2562 | static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) | ||
2563 | { | ||
2564 | u16 data; | ||
2565 | |||
2566 | r8152_mmd_indirect(tp, dev, reg); | ||
2567 | data = ocp_reg_read(tp, OCP_EEE_DATA); | ||
2568 | ocp_reg_write(tp, OCP_EEE_AR, 0x0000); | ||
2569 | |||
2570 | return data; | ||
2571 | } | ||
2572 | |||
2573 | static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) | ||
2574 | { | ||
2575 | r8152_mmd_indirect(tp, dev, reg); | ||
2576 | ocp_reg_write(tp, OCP_EEE_DATA, data); | ||
2577 | ocp_reg_write(tp, OCP_EEE_AR, 0x0000); | ||
2578 | } | ||
2579 | |||
2580 | static void r8152_eee_en(struct r8152 *tp, bool enable) | ||
2581 | { | ||
2582 | u16 config1, config2, config3; | ||
2583 | u32 ocp_data; | ||
2584 | |||
2585 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); | ||
2586 | config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; | ||
2587 | config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); | ||
2588 | config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; | ||
2589 | |||
2590 | if (enable) { | ||
2591 | ocp_data |= EEE_RX_EN | EEE_TX_EN; | ||
2592 | config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; | ||
2593 | config1 |= sd_rise_time(1); | ||
2594 | config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; | ||
2595 | config3 |= fast_snr(42); | ||
2596 | } else { | ||
2597 | ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); | ||
2598 | config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | | ||
2599 | RX_QUIET_EN); | ||
2600 | config1 |= sd_rise_time(7); | ||
2601 | config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); | ||
2602 | config3 |= fast_snr(511); | ||
2603 | } | ||
2604 | |||
2605 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); | ||
2606 | ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); | ||
2607 | ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); | ||
2608 | ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); | ||
2609 | } | ||
2610 | |||
2611 | static void r8152b_enable_eee(struct r8152 *tp) | ||
2612 | { | ||
2613 | r8152_eee_en(tp, true); | ||
2614 | r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); | ||
2615 | } | ||
2616 | |||
2617 | static void r8152b_enable_fc(struct r8152 *tp) | ||
2618 | { | ||
2619 | u16 anar; | ||
2620 | |||
2621 | anar = r8152_mdio_read(tp, MII_ADVERTISE); | ||
2622 | anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
2623 | r8152_mdio_write(tp, MII_ADVERTISE, anar); | ||
2624 | } | ||
2625 | |||
2555 | static void rtl8152_disable(struct r8152 *tp) | 2626 | static void rtl8152_disable(struct r8152 *tp) |
2556 | { | 2627 | { |
2557 | r8152_aldps_en(tp, false); | 2628 | r8152_aldps_en(tp, false); |
@@ -2561,13 +2632,9 @@ static void rtl8152_disable(struct r8152 *tp) | |||
2561 | 2632 | ||
2562 | static void r8152b_hw_phy_cfg(struct r8152 *tp) | 2633 | static void r8152b_hw_phy_cfg(struct r8152 *tp) |
2563 | { | 2634 | { |
2564 | u16 data; | 2635 | r8152b_enable_eee(tp); |
2565 | 2636 | r8152_aldps_en(tp, true); | |
2566 | data = r8152_mdio_read(tp, MII_BMCR); | 2637 | r8152b_enable_fc(tp); |
2567 | if (data & BMCR_PDOWN) { | ||
2568 | data &= ~BMCR_PDOWN; | ||
2569 | r8152_mdio_write(tp, MII_BMCR, data); | ||
2570 | } | ||
2571 | 2638 | ||
2572 | set_bit(PHY_RESET, &tp->flags); | 2639 | set_bit(PHY_RESET, &tp->flags); |
2573 | } | 2640 | } |
@@ -2701,20 +2768,52 @@ static void r8152b_enter_oob(struct r8152 *tp) | |||
2701 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); | 2768 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); |
2702 | } | 2769 | } |
2703 | 2770 | ||
2771 | static void r8153_aldps_en(struct r8152 *tp, bool enable) | ||
2772 | { | ||
2773 | u16 data; | ||
2774 | |||
2775 | data = ocp_reg_read(tp, OCP_POWER_CFG); | ||
2776 | if (enable) { | ||
2777 | data |= EN_ALDPS; | ||
2778 | ocp_reg_write(tp, OCP_POWER_CFG, data); | ||
2779 | } else { | ||
2780 | data &= ~EN_ALDPS; | ||
2781 | ocp_reg_write(tp, OCP_POWER_CFG, data); | ||
2782 | msleep(20); | ||
2783 | } | ||
2784 | } | ||
2785 | |||
2786 | static void r8153_eee_en(struct r8152 *tp, bool enable) | ||
2787 | { | ||
2788 | u32 ocp_data; | ||
2789 | u16 config; | ||
2790 | |||
2791 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); | ||
2792 | config = ocp_reg_read(tp, OCP_EEE_CFG); | ||
2793 | |||
2794 | if (enable) { | ||
2795 | ocp_data |= EEE_RX_EN | EEE_TX_EN; | ||
2796 | config |= EEE10_EN; | ||
2797 | } else { | ||
2798 | ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); | ||
2799 | config &= ~EEE10_EN; | ||
2800 | } | ||
2801 | |||
2802 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); | ||
2803 | ocp_reg_write(tp, OCP_EEE_CFG, config); | ||
2804 | } | ||
2805 | |||
2704 | static void r8153_hw_phy_cfg(struct r8152 *tp) | 2806 | static void r8153_hw_phy_cfg(struct r8152 *tp) |
2705 | { | 2807 | { |
2706 | u32 ocp_data; | 2808 | u32 ocp_data; |
2707 | u16 data; | 2809 | u16 data; |
2708 | 2810 | ||
2709 | if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || | 2811 | /* disable ALDPS before updating the PHY parameters */ |
2710 | tp->version == RTL_VER_05) | 2812 | r8153_aldps_en(tp, false); |
2711 | ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); | ||
2712 | 2813 | ||
2713 | data = r8152_mdio_read(tp, MII_BMCR); | 2814 | /* disable EEE before updating the PHY parameters */ |
2714 | if (data & BMCR_PDOWN) { | 2815 | r8153_eee_en(tp, false); |
2715 | data &= ~BMCR_PDOWN; | 2816 | ocp_reg_write(tp, OCP_EEE_ADV, 0); |
2716 | r8152_mdio_write(tp, MII_BMCR, data); | ||
2717 | } | ||
2718 | 2817 | ||
2719 | if (tp->version == RTL_VER_03) { | 2818 | if (tp->version == RTL_VER_03) { |
2720 | data = ocp_reg_read(tp, OCP_EEE_CFG); | 2819 | data = ocp_reg_read(tp, OCP_EEE_CFG); |
@@ -2745,6 +2844,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) | |||
2745 | sram_write(tp, SRAM_10M_AMP1, 0x00af); | 2844 | sram_write(tp, SRAM_10M_AMP1, 0x00af); |
2746 | sram_write(tp, SRAM_10M_AMP2, 0x0208); | 2845 | sram_write(tp, SRAM_10M_AMP2, 0x0208); |
2747 | 2846 | ||
2847 | r8153_eee_en(tp, true); | ||
2848 | ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); | ||
2849 | |||
2850 | r8153_aldps_en(tp, true); | ||
2851 | r8152b_enable_fc(tp); | ||
2852 | |||
2748 | set_bit(PHY_RESET, &tp->flags); | 2853 | set_bit(PHY_RESET, &tp->flags); |
2749 | } | 2854 | } |
2750 | 2855 | ||
@@ -2866,21 +2971,6 @@ static void r8153_enter_oob(struct r8152 *tp) | |||
2866 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); | 2971 | ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); |
2867 | } | 2972 | } |
2868 | 2973 | ||
2869 | static void r8153_aldps_en(struct r8152 *tp, bool enable) | ||
2870 | { | ||
2871 | u16 data; | ||
2872 | |||
2873 | data = ocp_reg_read(tp, OCP_POWER_CFG); | ||
2874 | if (enable) { | ||
2875 | data |= EN_ALDPS; | ||
2876 | ocp_reg_write(tp, OCP_POWER_CFG, data); | ||
2877 | } else { | ||
2878 | data &= ~EN_ALDPS; | ||
2879 | ocp_reg_write(tp, OCP_POWER_CFG, data); | ||
2880 | msleep(20); | ||
2881 | } | ||
2882 | } | ||
2883 | |||
2884 | static void rtl8153_disable(struct r8152 *tp) | 2974 | static void rtl8153_disable(struct r8152 *tp) |
2885 | { | 2975 | { |
2886 | r8153_aldps_en(tp, false); | 2976 | r8153_aldps_en(tp, false); |
@@ -3246,103 +3336,6 @@ static int rtl8152_close(struct net_device *netdev) | |||
3246 | return res; | 3336 | return res; |
3247 | } | 3337 | } |
3248 | 3338 | ||
3249 | static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) | ||
3250 | { | ||
3251 | ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); | ||
3252 | ocp_reg_write(tp, OCP_EEE_DATA, reg); | ||
3253 | ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); | ||
3254 | } | ||
3255 | |||
3256 | static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) | ||
3257 | { | ||
3258 | u16 data; | ||
3259 | |||
3260 | r8152_mmd_indirect(tp, dev, reg); | ||
3261 | data = ocp_reg_read(tp, OCP_EEE_DATA); | ||
3262 | ocp_reg_write(tp, OCP_EEE_AR, 0x0000); | ||
3263 | |||
3264 | return data; | ||
3265 | } | ||
3266 | |||
3267 | static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) | ||
3268 | { | ||
3269 | r8152_mmd_indirect(tp, dev, reg); | ||
3270 | ocp_reg_write(tp, OCP_EEE_DATA, data); | ||
3271 | ocp_reg_write(tp, OCP_EEE_AR, 0x0000); | ||
3272 | } | ||
3273 | |||
3274 | static void r8152_eee_en(struct r8152 *tp, bool enable) | ||
3275 | { | ||
3276 | u16 config1, config2, config3; | ||
3277 | u32 ocp_data; | ||
3278 | |||
3279 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); | ||
3280 | config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; | ||
3281 | config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); | ||
3282 | config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; | ||
3283 | |||
3284 | if (enable) { | ||
3285 | ocp_data |= EEE_RX_EN | EEE_TX_EN; | ||
3286 | config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; | ||
3287 | config1 |= sd_rise_time(1); | ||
3288 | config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; | ||
3289 | config3 |= fast_snr(42); | ||
3290 | } else { | ||
3291 | ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); | ||
3292 | config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | | ||
3293 | RX_QUIET_EN); | ||
3294 | config1 |= sd_rise_time(7); | ||
3295 | config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); | ||
3296 | config3 |= fast_snr(511); | ||
3297 | } | ||
3298 | |||
3299 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); | ||
3300 | ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); | ||
3301 | ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); | ||
3302 | ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); | ||
3303 | } | ||
3304 | |||
3305 | static void r8152b_enable_eee(struct r8152 *tp) | ||
3306 | { | ||
3307 | r8152_eee_en(tp, true); | ||
3308 | r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); | ||
3309 | } | ||
3310 | |||
3311 | static void r8153_eee_en(struct r8152 *tp, bool enable) | ||
3312 | { | ||
3313 | u32 ocp_data; | ||
3314 | u16 config; | ||
3315 | |||
3316 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); | ||
3317 | config = ocp_reg_read(tp, OCP_EEE_CFG); | ||
3318 | |||
3319 | if (enable) { | ||
3320 | ocp_data |= EEE_RX_EN | EEE_TX_EN; | ||
3321 | config |= EEE10_EN; | ||
3322 | } else { | ||
3323 | ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); | ||
3324 | config &= ~EEE10_EN; | ||
3325 | } | ||
3326 | |||
3327 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); | ||
3328 | ocp_reg_write(tp, OCP_EEE_CFG, config); | ||
3329 | } | ||
3330 | |||
3331 | static void r8153_enable_eee(struct r8152 *tp) | ||
3332 | { | ||
3333 | r8153_eee_en(tp, true); | ||
3334 | ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); | ||
3335 | } | ||
3336 | |||
3337 | static void r8152b_enable_fc(struct r8152 *tp) | ||
3338 | { | ||
3339 | u16 anar; | ||
3340 | |||
3341 | anar = r8152_mdio_read(tp, MII_ADVERTISE); | ||
3342 | anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
3343 | r8152_mdio_write(tp, MII_ADVERTISE, anar); | ||
3344 | } | ||
3345 | |||
3346 | static void rtl_tally_reset(struct r8152 *tp) | 3339 | static void rtl_tally_reset(struct r8152 *tp) |
3347 | { | 3340 | { |
3348 | u32 ocp_data; | 3341 | u32 ocp_data; |
@@ -3355,10 +3348,17 @@ static void rtl_tally_reset(struct r8152 *tp) | |||
3355 | static void r8152b_init(struct r8152 *tp) | 3348 | static void r8152b_init(struct r8152 *tp) |
3356 | { | 3349 | { |
3357 | u32 ocp_data; | 3350 | u32 ocp_data; |
3351 | u16 data; | ||
3358 | 3352 | ||
3359 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3353 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
3360 | return; | 3354 | return; |
3361 | 3355 | ||
3356 | data = r8152_mdio_read(tp, MII_BMCR); | ||
3357 | if (data & BMCR_PDOWN) { | ||
3358 | data &= ~BMCR_PDOWN; | ||
3359 | r8152_mdio_write(tp, MII_BMCR, data); | ||
3360 | } | ||
3361 | |||
3362 | r8152_aldps_en(tp, false); | 3362 | r8152_aldps_en(tp, false); |
3363 | 3363 | ||
3364 | if (tp->version == RTL_VER_01) { | 3364 | if (tp->version == RTL_VER_01) { |
@@ -3380,9 +3380,6 @@ static void r8152b_init(struct r8152 *tp) | |||
3380 | SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; | 3380 | SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; |
3381 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); | 3381 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); |
3382 | 3382 | ||
3383 | r8152b_enable_eee(tp); | ||
3384 | r8152_aldps_en(tp, true); | ||
3385 | r8152b_enable_fc(tp); | ||
3386 | rtl_tally_reset(tp); | 3383 | rtl_tally_reset(tp); |
3387 | 3384 | ||
3388 | /* enable rx aggregation */ | 3385 | /* enable rx aggregation */ |
@@ -3394,12 +3391,12 @@ static void r8152b_init(struct r8152 *tp) | |||
3394 | static void r8153_init(struct r8152 *tp) | 3391 | static void r8153_init(struct r8152 *tp) |
3395 | { | 3392 | { |
3396 | u32 ocp_data; | 3393 | u32 ocp_data; |
3394 | u16 data; | ||
3397 | int i; | 3395 | int i; |
3398 | 3396 | ||
3399 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) | 3397 | if (test_bit(RTL8152_UNPLUG, &tp->flags)) |
3400 | return; | 3398 | return; |
3401 | 3399 | ||
3402 | r8153_aldps_en(tp, false); | ||
3403 | r8153_u1u2en(tp, false); | 3400 | r8153_u1u2en(tp, false); |
3404 | 3401 | ||
3405 | for (i = 0; i < 500; i++) { | 3402 | for (i = 0; i < 500; i++) { |
@@ -3416,6 +3413,23 @@ static void r8153_init(struct r8152 *tp) | |||
3416 | msleep(20); | 3413 | msleep(20); |
3417 | } | 3414 | } |
3418 | 3415 | ||
3416 | if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || | ||
3417 | tp->version == RTL_VER_05) | ||
3418 | ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); | ||
3419 | |||
3420 | data = r8152_mdio_read(tp, MII_BMCR); | ||
3421 | if (data & BMCR_PDOWN) { | ||
3422 | data &= ~BMCR_PDOWN; | ||
3423 | r8152_mdio_write(tp, MII_BMCR, data); | ||
3424 | } | ||
3425 | |||
3426 | for (i = 0; i < 500; i++) { | ||
3427 | ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK; | ||
3428 | if (ocp_data == PHY_STAT_LAN_ON) | ||
3429 | break; | ||
3430 | msleep(20); | ||
3431 | } | ||
3432 | |||
3419 | usb_disable_lpm(tp->udev); | 3433 | usb_disable_lpm(tp->udev); |
3420 | r8153_u2p3en(tp, false); | 3434 | r8153_u2p3en(tp, false); |
3421 | 3435 | ||
@@ -3483,9 +3497,6 @@ static void r8153_init(struct r8152 *tp) | |||
3483 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); | 3497 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); |
3484 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); | 3498 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); |
3485 | 3499 | ||
3486 | r8153_enable_eee(tp); | ||
3487 | r8153_aldps_en(tp, true); | ||
3488 | r8152b_enable_fc(tp); | ||
3489 | rtl_tally_reset(tp); | 3500 | rtl_tally_reset(tp); |
3490 | r8153_u2p3en(tp, true); | 3501 | r8153_u2p3en(tp, true); |
3491 | } | 3502 | } |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index c68fe495d3f9..4244b9d4418e 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -914,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
914 | { | 914 | { |
915 | struct Vmxnet3_TxDataDesc *tdd; | 915 | struct Vmxnet3_TxDataDesc *tdd; |
916 | 916 | ||
917 | tdd = tq->data_ring.base + tq->tx_ring.next2fill; | 917 | tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + |
918 | tq->tx_ring.next2fill * | ||
919 | tq->txdata_desc_size); | ||
918 | 920 | ||
919 | memcpy(tdd->data, skb->data, ctx->copy_size); | 921 | memcpy(tdd->data, skb->data, ctx->copy_size); |
920 | netdev_dbg(adapter->netdev, | 922 | netdev_dbg(adapter->netdev, |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 74fc03072b87..7dc37a090549 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -69,10 +69,10 @@ | |||
69 | /* | 69 | /* |
70 | * Version numbers | 70 | * Version numbers |
71 | */ | 71 | */ |
72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.9.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k" |
73 | 73 | ||
74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040900 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040a00 |
76 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index da4e3d6632f6..6e65832051d6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, | |||
1811 | fl4.flowi4_mark = skb->mark; | 1811 | fl4.flowi4_mark = skb->mark; |
1812 | fl4.flowi4_proto = IPPROTO_UDP; | 1812 | fl4.flowi4_proto = IPPROTO_UDP; |
1813 | fl4.daddr = daddr; | 1813 | fl4.daddr = daddr; |
1814 | fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; | 1814 | fl4.saddr = *saddr; |
1815 | 1815 | ||
1816 | rt = ip_route_output_key(vxlan->net, &fl4); | 1816 | rt = ip_route_output_key(vxlan->net, &fl4); |
1817 | if (!IS_ERR(rt)) { | 1817 | if (!IS_ERR(rt)) { |
@@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, | |||
1847 | memset(&fl6, 0, sizeof(fl6)); | 1847 | memset(&fl6, 0, sizeof(fl6)); |
1848 | fl6.flowi6_oif = oif; | 1848 | fl6.flowi6_oif = oif; |
1849 | fl6.daddr = *daddr; | 1849 | fl6.daddr = *daddr; |
1850 | fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; | 1850 | fl6.saddr = *saddr; |
1851 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); | 1851 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); |
1852 | fl6.flowi6_mark = skb->mark; | 1852 | fl6.flowi6_mark = skb->mark; |
1853 | fl6.flowi6_proto = IPPROTO_UDP; | 1853 | fl6.flowi6_proto = IPPROTO_UDP; |
@@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1920 | struct rtable *rt = NULL; | 1920 | struct rtable *rt = NULL; |
1921 | const struct iphdr *old_iph; | 1921 | const struct iphdr *old_iph; |
1922 | union vxlan_addr *dst; | 1922 | union vxlan_addr *dst; |
1923 | union vxlan_addr remote_ip; | 1923 | union vxlan_addr remote_ip, local_ip; |
1924 | union vxlan_addr *src; | ||
1924 | struct vxlan_metadata _md; | 1925 | struct vxlan_metadata _md; |
1925 | struct vxlan_metadata *md = &_md; | 1926 | struct vxlan_metadata *md = &_md; |
1926 | __be16 src_port = 0, dst_port; | 1927 | __be16 src_port = 0, dst_port; |
@@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1938 | dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; | 1939 | dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; |
1939 | vni = rdst->remote_vni; | 1940 | vni = rdst->remote_vni; |
1940 | dst = &rdst->remote_ip; | 1941 | dst = &rdst->remote_ip; |
1942 | src = &vxlan->cfg.saddr; | ||
1941 | dst_cache = &rdst->dst_cache; | 1943 | dst_cache = &rdst->dst_cache; |
1942 | } else { | 1944 | } else { |
1943 | if (!info) { | 1945 | if (!info) { |
@@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1948 | dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; | 1950 | dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; |
1949 | vni = vxlan_tun_id_to_vni(info->key.tun_id); | 1951 | vni = vxlan_tun_id_to_vni(info->key.tun_id); |
1950 | remote_ip.sa.sa_family = ip_tunnel_info_af(info); | 1952 | remote_ip.sa.sa_family = ip_tunnel_info_af(info); |
1951 | if (remote_ip.sa.sa_family == AF_INET) | 1953 | if (remote_ip.sa.sa_family == AF_INET) { |
1952 | remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; | 1954 | remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; |
1953 | else | 1955 | local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; |
1956 | } else { | ||
1954 | remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; | 1957 | remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; |
1958 | local_ip.sin6.sin6_addr = info->key.u.ipv6.src; | ||
1959 | } | ||
1955 | dst = &remote_ip; | 1960 | dst = &remote_ip; |
1961 | src = &local_ip; | ||
1956 | dst_cache = &info->dst_cache; | 1962 | dst_cache = &info->dst_cache; |
1957 | } | 1963 | } |
1958 | 1964 | ||
@@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1992 | } | 1998 | } |
1993 | 1999 | ||
1994 | if (dst->sa.sa_family == AF_INET) { | 2000 | if (dst->sa.sa_family == AF_INET) { |
1995 | __be32 saddr; | ||
1996 | |||
1997 | if (!vxlan->vn4_sock) | 2001 | if (!vxlan->vn4_sock) |
1998 | goto drop; | 2002 | goto drop; |
1999 | sk = vxlan->vn4_sock->sock->sk; | 2003 | sk = vxlan->vn4_sock->sock->sk; |
2000 | 2004 | ||
2001 | rt = vxlan_get_route(vxlan, skb, | 2005 | rt = vxlan_get_route(vxlan, skb, |
2002 | rdst ? rdst->remote_ifindex : 0, tos, | 2006 | rdst ? rdst->remote_ifindex : 0, tos, |
2003 | dst->sin.sin_addr.s_addr, &saddr, | 2007 | dst->sin.sin_addr.s_addr, |
2008 | &src->sin.sin_addr.s_addr, | ||
2004 | dst_cache, info); | 2009 | dst_cache, info); |
2005 | if (IS_ERR(rt)) { | 2010 | if (IS_ERR(rt)) { |
2006 | netdev_dbg(dev, "no route to %pI4\n", | 2011 | netdev_dbg(dev, "no route to %pI4\n", |
@@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2017 | } | 2022 | } |
2018 | 2023 | ||
2019 | /* Bypass encapsulation if the destination is local */ | 2024 | /* Bypass encapsulation if the destination is local */ |
2020 | if (rt->rt_flags & RTCF_LOCAL && | 2025 | if (!info && rt->rt_flags & RTCF_LOCAL && |
2021 | !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { | 2026 | !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { |
2022 | struct vxlan_dev *dst_vxlan; | 2027 | struct vxlan_dev *dst_vxlan; |
2023 | 2028 | ||
@@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2043 | if (err < 0) | 2048 | if (err < 0) |
2044 | goto xmit_tx_error; | 2049 | goto xmit_tx_error; |
2045 | 2050 | ||
2046 | udp_tunnel_xmit_skb(rt, sk, skb, saddr, | 2051 | udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, |
2047 | dst->sin.sin_addr.s_addr, tos, ttl, df, | 2052 | dst->sin.sin_addr.s_addr, tos, ttl, df, |
2048 | src_port, dst_port, xnet, !udp_sum); | 2053 | src_port, dst_port, xnet, !udp_sum); |
2049 | #if IS_ENABLED(CONFIG_IPV6) | 2054 | #if IS_ENABLED(CONFIG_IPV6) |
2050 | } else { | 2055 | } else { |
2051 | struct dst_entry *ndst; | 2056 | struct dst_entry *ndst; |
2052 | struct in6_addr saddr; | ||
2053 | u32 rt6i_flags; | 2057 | u32 rt6i_flags; |
2054 | 2058 | ||
2055 | if (!vxlan->vn6_sock) | 2059 | if (!vxlan->vn6_sock) |
@@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2058 | 2062 | ||
2059 | ndst = vxlan6_get_route(vxlan, skb, | 2063 | ndst = vxlan6_get_route(vxlan, skb, |
2060 | rdst ? rdst->remote_ifindex : 0, tos, | 2064 | rdst ? rdst->remote_ifindex : 0, tos, |
2061 | label, &dst->sin6.sin6_addr, &saddr, | 2065 | label, &dst->sin6.sin6_addr, |
2066 | &src->sin6.sin6_addr, | ||
2062 | dst_cache, info); | 2067 | dst_cache, info); |
2063 | if (IS_ERR(ndst)) { | 2068 | if (IS_ERR(ndst)) { |
2064 | netdev_dbg(dev, "no route to %pI6\n", | 2069 | netdev_dbg(dev, "no route to %pI6\n", |
@@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2077 | 2082 | ||
2078 | /* Bypass encapsulation if the destination is local */ | 2083 | /* Bypass encapsulation if the destination is local */ |
2079 | rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; | 2084 | rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; |
2080 | if (rt6i_flags & RTF_LOCAL && | 2085 | if (!info && rt6i_flags & RTF_LOCAL && |
2081 | !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { | 2086 | !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { |
2082 | struct vxlan_dev *dst_vxlan; | 2087 | struct vxlan_dev *dst_vxlan; |
2083 | 2088 | ||
@@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2104 | return; | 2109 | return; |
2105 | } | 2110 | } |
2106 | udp_tunnel6_xmit_skb(ndst, sk, skb, dev, | 2111 | udp_tunnel6_xmit_skb(ndst, sk, skb, dev, |
2107 | &saddr, &dst->sin6.sin6_addr, tos, ttl, | 2112 | &src->sin6.sin6_addr, |
2113 | &dst->sin6.sin6_addr, tos, ttl, | ||
2108 | label, src_port, dst_port, !udp_sum); | 2114 | label, src_port, dst_port, !udp_sum); |
2109 | #endif | 2115 | #endif |
2110 | } | 2116 | } |
@@ -2776,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
2776 | struct net_device *lowerdev = NULL; | 2782 | struct net_device *lowerdev = NULL; |
2777 | 2783 | ||
2778 | if (conf->flags & VXLAN_F_GPE) { | 2784 | if (conf->flags & VXLAN_F_GPE) { |
2779 | if (conf->flags & ~VXLAN_F_ALLOWED_GPE) | ||
2780 | return -EINVAL; | ||
2781 | /* For now, allow GPE only together with COLLECT_METADATA. | 2785 | /* For now, allow GPE only together with COLLECT_METADATA. |
2782 | * This can be relaxed later; in such case, the other side | 2786 | * This can be relaxed later; in such case, the other side |
2783 | * of the PtP link will have to be provided. | 2787 | * of the PtP link will have to be provided. |
2784 | */ | 2788 | */ |
2785 | if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) | 2789 | if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || |
2790 | !(conf->flags & VXLAN_F_COLLECT_METADATA)) { | ||
2791 | pr_info("unsupported combination of extensions\n"); | ||
2786 | return -EINVAL; | 2792 | return -EINVAL; |
2793 | } | ||
2787 | 2794 | ||
2788 | vxlan_raw_setup(dev); | 2795 | vxlan_raw_setup(dev); |
2789 | } else { | 2796 | } else { |
@@ -2836,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
2836 | dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); | 2843 | dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); |
2837 | 2844 | ||
2838 | needed_headroom = lowerdev->hard_header_len; | 2845 | needed_headroom = lowerdev->hard_header_len; |
2846 | } else if (vxlan_addr_multicast(&dst->remote_ip)) { | ||
2847 | pr_info("multicast destination requires interface to be specified\n"); | ||
2848 | return -EINVAL; | ||
2839 | } | 2849 | } |
2840 | 2850 | ||
2841 | if (conf->mtu) { | 2851 | if (conf->mtu) { |
@@ -2868,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
2868 | tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && | 2878 | tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && |
2869 | tmp->cfg.dst_port == vxlan->cfg.dst_port && | 2879 | tmp->cfg.dst_port == vxlan->cfg.dst_port && |
2870 | (tmp->flags & VXLAN_F_RCV_FLAGS) == | 2880 | (tmp->flags & VXLAN_F_RCV_FLAGS) == |
2871 | (vxlan->flags & VXLAN_F_RCV_FLAGS)) | 2881 | (vxlan->flags & VXLAN_F_RCV_FLAGS)) { |
2872 | return -EEXIST; | 2882 | pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni)); |
2883 | return -EEXIST; | ||
2884 | } | ||
2873 | } | 2885 | } |
2874 | 2886 | ||
2875 | dev->ethtool_ops = &vxlan_ethtool_ops; | 2887 | dev->ethtool_ops = &vxlan_ethtool_ops; |
@@ -2903,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, | |||
2903 | struct nlattr *tb[], struct nlattr *data[]) | 2915 | struct nlattr *tb[], struct nlattr *data[]) |
2904 | { | 2916 | { |
2905 | struct vxlan_config conf; | 2917 | struct vxlan_config conf; |
2906 | int err; | ||
2907 | 2918 | ||
2908 | memset(&conf, 0, sizeof(conf)); | 2919 | memset(&conf, 0, sizeof(conf)); |
2909 | 2920 | ||
@@ -3012,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, | |||
3012 | if (tb[IFLA_MTU]) | 3023 | if (tb[IFLA_MTU]) |
3013 | conf.mtu = nla_get_u32(tb[IFLA_MTU]); | 3024 | conf.mtu = nla_get_u32(tb[IFLA_MTU]); |
3014 | 3025 | ||
3015 | err = vxlan_dev_configure(src_net, dev, &conf); | 3026 | return vxlan_dev_configure(src_net, dev, &conf); |
3016 | switch (err) { | ||
3017 | case -ENODEV: | ||
3018 | pr_info("ifindex %d does not exist\n", conf.remote_ifindex); | ||
3019 | break; | ||
3020 | |||
3021 | case -EPERM: | ||
3022 | pr_info("IPv6 is disabled via sysctl\n"); | ||
3023 | break; | ||
3024 | |||
3025 | case -EEXIST: | ||
3026 | pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni)); | ||
3027 | break; | ||
3028 | |||
3029 | case -EINVAL: | ||
3030 | pr_info("unsupported combination of extensions\n"); | ||
3031 | break; | ||
3032 | } | ||
3033 | |||
3034 | return err; | ||
3035 | } | 3027 | } |
3036 | 3028 | ||
3037 | static void vxlan_dellink(struct net_device *dev, struct list_head *head) | 3029 | static void vxlan_dellink(struct net_device *dev, struct list_head *head) |
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 78db5d679f19..24c8d65bcf34 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c | |||
@@ -1525,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar, | |||
1525 | static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) | 1525 | static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) |
1526 | { | 1526 | { |
1527 | struct ath10k *ar = htt->ar; | 1527 | struct ath10k *ar = htt->ar; |
1528 | static struct ieee80211_rx_status rx_status; | 1528 | struct ieee80211_rx_status *rx_status = &htt->rx_status; |
1529 | struct sk_buff_head amsdu; | 1529 | struct sk_buff_head amsdu; |
1530 | int ret; | 1530 | int ret; |
1531 | 1531 | ||
@@ -1549,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) | |||
1549 | return ret; | 1549 | return ret; |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff); | 1552 | ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); |
1553 | ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); | 1553 | ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); |
1554 | ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status); | 1554 | ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); |
1555 | ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status); | 1555 | ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); |
1556 | ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status); | 1556 | ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); |
1557 | 1557 | ||
1558 | return 0; | 1558 | return 0; |
1559 | } | 1559 | } |
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 9a22c478dd1b..07933c51a850 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
@@ -3162,7 +3162,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
3162 | pci_hard_reset = ath10k_pci_qca988x_chip_reset; | 3162 | pci_hard_reset = ath10k_pci_qca988x_chip_reset; |
3163 | break; | 3163 | break; |
3164 | case QCA9887_1_0_DEVICE_ID: | 3164 | case QCA9887_1_0_DEVICE_ID: |
3165 | dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n"); | ||
3166 | hw_rev = ATH10K_HW_QCA9887; | 3165 | hw_rev = ATH10K_HW_QCA9887; |
3167 | pci_ps = false; | 3166 | pci_ps = false; |
3168 | pci_soft_reset = ath10k_pci_warm_reset; | 3167 | pci_soft_reset = ath10k_pci_warm_reset; |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index d1d0c06d627c..14b13f07cd1f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) | |||
2482 | return -EINVAL; | 2482 | return -EINVAL; |
2483 | } | 2483 | } |
2484 | 2484 | ||
2485 | ath9k_gpio_cap_init(ah); | ||
2486 | |||
2485 | if (AR_SREV_9485(ah) || | 2487 | if (AR_SREV_9485(ah) || |
2486 | AR_SREV_9285(ah) || | 2488 | AR_SREV_9285(ah) || |
2487 | AR_SREV_9330(ah) || | 2489 | AR_SREV_9330(ah) || |
@@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) | |||
2531 | else | 2533 | else |
2532 | pCap->hw_caps &= ~ATH9K_HW_CAP_HT; | 2534 | pCap->hw_caps &= ~ATH9K_HW_CAP_HT; |
2533 | 2535 | ||
2534 | ath9k_gpio_cap_init(ah); | ||
2535 | |||
2536 | if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) | 2536 | if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) |
2537 | pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; | 2537 | pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; |
2538 | else | 2538 | else |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a394622c9022..7cb65c303f8d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
718 | if (!ath_complete_reset(sc, false)) | 718 | if (!ath_complete_reset(sc, false)) |
719 | ah->reset_power_on = false; | 719 | ah->reset_power_on = false; |
720 | 720 | ||
721 | if (ah->led_pin >= 0) | 721 | if (ah->led_pin >= 0) { |
722 | ath9k_hw_set_gpio(ah, ah->led_pin, | 722 | ath9k_hw_set_gpio(ah, ah->led_pin, |
723 | (ah->config.led_active_high) ? 1 : 0); | 723 | (ah->config.led_active_high) ? 1 : 0); |
724 | ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL, | ||
725 | AR_GPIO_OUTPUT_MUX_AS_OUTPUT); | ||
726 | } | ||
724 | 727 | ||
725 | /* | 728 | /* |
726 | * Reset key cache to sane defaults (all entries cleared) instead of | 729 | * Reset key cache to sane defaults (all entries cleared) instead of |
@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
864 | 867 | ||
865 | spin_lock_bh(&sc->sc_pcu_lock); | 868 | spin_lock_bh(&sc->sc_pcu_lock); |
866 | 869 | ||
867 | if (ah->led_pin >= 0) | 870 | if (ah->led_pin >= 0) { |
868 | ath9k_hw_set_gpio(ah, ah->led_pin, | 871 | ath9k_hw_set_gpio(ah, ah->led_pin, |
869 | (ah->config.led_active_high) ? 0 : 1); | 872 | (ah->config.led_active_high) ? 0 : 1); |
873 | ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL); | ||
874 | } | ||
870 | 875 | ||
871 | ath_prepare_reset(sc); | 876 | ath_prepare_reset(sc); |
872 | 877 | ||
@@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, | |||
1154 | bool changed = (iter_data.primary_sta != ctx->primary_sta); | 1159 | bool changed = (iter_data.primary_sta != ctx->primary_sta); |
1155 | 1160 | ||
1156 | if (iter_data.primary_sta) { | 1161 | if (iter_data.primary_sta) { |
1162 | iter_data.primary_beacon_vif = iter_data.primary_sta; | ||
1157 | iter_data.beacons = true; | 1163 | iter_data.beacons = true; |
1158 | ath9k_set_assoc_state(sc, iter_data.primary_sta, | 1164 | ath9k_set_assoc_state(sc, iter_data.primary_sta, |
1159 | changed); | 1165 | changed); |
@@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw, | |||
1563 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 1569 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
1564 | int ret = 0; | 1570 | int ret = 0; |
1565 | 1571 | ||
1566 | if (old_state == IEEE80211_STA_AUTH && | 1572 | if (old_state == IEEE80211_STA_NOTEXIST && |
1567 | new_state == IEEE80211_STA_ASSOC) { | 1573 | new_state == IEEE80211_STA_NONE) { |
1568 | ret = ath9k_sta_add(hw, vif, sta); | 1574 | ret = ath9k_sta_add(hw, vif, sta); |
1569 | ath_dbg(common, CONFIG, | 1575 | ath_dbg(common, CONFIG, |
1570 | "Add station: %pM\n", sta->addr); | 1576 | "Add station: %pM\n", sta->addr); |
1571 | } else if (old_state == IEEE80211_STA_ASSOC && | 1577 | } else if (old_state == IEEE80211_STA_NONE && |
1572 | new_state == IEEE80211_STA_AUTH) { | 1578 | new_state == IEEE80211_STA_NOTEXIST) { |
1573 | ret = ath9k_sta_remove(hw, vif, sta); | 1579 | ret = ath9k_sta_remove(hw, vif, sta); |
1574 | ath_dbg(common, CONFIG, | 1580 | ath_dbg(common, CONFIG, |
1575 | "Remove station: %pM\n", sta->addr); | 1581 | "Remove station: %pM\n", sta->addr); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 2628d5e12c64..b8aec5e5ef93 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
@@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, | |||
4527 | (u8 *)&settings->beacon.head[ie_offset], | 4527 | (u8 *)&settings->beacon.head[ie_offset], |
4528 | settings->beacon.head_len - ie_offset, | 4528 | settings->beacon.head_len - ie_offset, |
4529 | WLAN_EID_SSID); | 4529 | WLAN_EID_SSID); |
4530 | if (!ssid_ie) | 4530 | if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN) |
4531 | return -EINVAL; | 4531 | return -EINVAL; |
4532 | 4532 | ||
4533 | memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); | 4533 | memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); |
@@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, | |||
5635 | ifevent->action, ifevent->flags, ifevent->ifidx, | 5635 | ifevent->action, ifevent->flags, ifevent->ifidx, |
5636 | ifevent->bsscfgidx); | 5636 | ifevent->bsscfgidx); |
5637 | 5637 | ||
5638 | mutex_lock(&event->vif_event_lock); | 5638 | spin_lock(&event->vif_event_lock); |
5639 | event->action = ifevent->action; | 5639 | event->action = ifevent->action; |
5640 | vif = event->vif; | 5640 | vif = event->vif; |
5641 | 5641 | ||
@@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, | |||
5643 | case BRCMF_E_IF_ADD: | 5643 | case BRCMF_E_IF_ADD: |
5644 | /* waiting process may have timed out */ | 5644 | /* waiting process may have timed out */ |
5645 | if (!cfg->vif_event.vif) { | 5645 | if (!cfg->vif_event.vif) { |
5646 | mutex_unlock(&event->vif_event_lock); | 5646 | spin_unlock(&event->vif_event_lock); |
5647 | return -EBADF; | 5647 | return -EBADF; |
5648 | } | 5648 | } |
5649 | 5649 | ||
@@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, | |||
5654 | ifp->ndev->ieee80211_ptr = &vif->wdev; | 5654 | ifp->ndev->ieee80211_ptr = &vif->wdev; |
5655 | SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); | 5655 | SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); |
5656 | } | 5656 | } |
5657 | mutex_unlock(&event->vif_event_lock); | 5657 | spin_unlock(&event->vif_event_lock); |
5658 | wake_up(&event->vif_wq); | 5658 | wake_up(&event->vif_wq); |
5659 | return 0; | 5659 | return 0; |
5660 | 5660 | ||
5661 | case BRCMF_E_IF_DEL: | 5661 | case BRCMF_E_IF_DEL: |
5662 | mutex_unlock(&event->vif_event_lock); | 5662 | spin_unlock(&event->vif_event_lock); |
5663 | /* event may not be upon user request */ | 5663 | /* event may not be upon user request */ |
5664 | if (brcmf_cfg80211_vif_event_armed(cfg)) | 5664 | if (brcmf_cfg80211_vif_event_armed(cfg)) |
5665 | wake_up(&event->vif_wq); | 5665 | wake_up(&event->vif_wq); |
5666 | return 0; | 5666 | return 0; |
5667 | 5667 | ||
5668 | case BRCMF_E_IF_CHANGE: | 5668 | case BRCMF_E_IF_CHANGE: |
5669 | mutex_unlock(&event->vif_event_lock); | 5669 | spin_unlock(&event->vif_event_lock); |
5670 | wake_up(&event->vif_wq); | 5670 | wake_up(&event->vif_wq); |
5671 | return 0; | 5671 | return 0; |
5672 | 5672 | ||
5673 | default: | 5673 | default: |
5674 | mutex_unlock(&event->vif_event_lock); | 5674 | spin_unlock(&event->vif_event_lock); |
5675 | break; | 5675 | break; |
5676 | } | 5676 | } |
5677 | return -EINVAL; | 5677 | return -EINVAL; |
@@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg) | |||
5792 | static void init_vif_event(struct brcmf_cfg80211_vif_event *event) | 5792 | static void init_vif_event(struct brcmf_cfg80211_vif_event *event) |
5793 | { | 5793 | { |
5794 | init_waitqueue_head(&event->vif_wq); | 5794 | init_waitqueue_head(&event->vif_wq); |
5795 | mutex_init(&event->vif_event_lock); | 5795 | spin_lock_init(&event->vif_event_lock); |
5796 | } | 5796 | } |
5797 | 5797 | ||
5798 | static s32 brcmf_dongle_roam(struct brcmf_if *ifp) | 5798 | static s32 brcmf_dongle_roam(struct brcmf_if *ifp) |
@@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event, | |||
6691 | { | 6691 | { |
6692 | u8 evt_action; | 6692 | u8 evt_action; |
6693 | 6693 | ||
6694 | mutex_lock(&event->vif_event_lock); | 6694 | spin_lock(&event->vif_event_lock); |
6695 | evt_action = event->action; | 6695 | evt_action = event->action; |
6696 | mutex_unlock(&event->vif_event_lock); | 6696 | spin_unlock(&event->vif_event_lock); |
6697 | return evt_action == action; | 6697 | return evt_action == action; |
6698 | } | 6698 | } |
6699 | 6699 | ||
@@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg, | |||
6702 | { | 6702 | { |
6703 | struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; | 6703 | struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; |
6704 | 6704 | ||
6705 | mutex_lock(&event->vif_event_lock); | 6705 | spin_lock(&event->vif_event_lock); |
6706 | event->vif = vif; | 6706 | event->vif = vif; |
6707 | event->action = 0; | 6707 | event->action = 0; |
6708 | mutex_unlock(&event->vif_event_lock); | 6708 | spin_unlock(&event->vif_event_lock); |
6709 | } | 6709 | } |
6710 | 6710 | ||
6711 | bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) | 6711 | bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) |
@@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) | |||
6713 | struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; | 6713 | struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; |
6714 | bool armed; | 6714 | bool armed; |
6715 | 6715 | ||
6716 | mutex_lock(&event->vif_event_lock); | 6716 | spin_lock(&event->vif_event_lock); |
6717 | armed = event->vif != NULL; | 6717 | armed = event->vif != NULL; |
6718 | mutex_unlock(&event->vif_event_lock); | 6718 | spin_unlock(&event->vif_event_lock); |
6719 | 6719 | ||
6720 | return armed; | 6720 | return armed; |
6721 | } | 6721 | } |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 7d77f869b7f1..8889832c17e0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h | |||
@@ -227,7 +227,7 @@ struct escan_info { | |||
227 | */ | 227 | */ |
228 | struct brcmf_cfg80211_vif_event { | 228 | struct brcmf_cfg80211_vif_event { |
229 | wait_queue_head_t vif_wq; | 229 | wait_queue_head_t vif_wq; |
230 | struct mutex vif_event_lock; | 230 | spinlock_t vif_event_lock; |
231 | u8 action; | 231 | u8 action; |
232 | struct brcmf_cfg80211_vif *vif; | 232 | struct brcmf_cfg80211_vif *vif; |
233 | }; | 233 | }; |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 8d16f0204985..65e8c8766441 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | |||
@@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, | |||
743 | * serious troublesome side effects. The p2p module will clean | 743 | * serious troublesome side effects. The p2p module will clean |
744 | * up the ifp if needed. | 744 | * up the ifp if needed. |
745 | */ | 745 | */ |
746 | brcmf_p2p_ifp_removed(ifp); | 746 | brcmf_p2p_ifp_removed(ifp, rtnl_locked); |
747 | kfree(ifp); | 747 | kfree(ifp); |
748 | } | 748 | } |
749 | } | 749 | } |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 66f942f7448e..de19c7c92bc6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | |||
@@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) | |||
2297 | return err; | 2297 | return err; |
2298 | } | 2298 | } |
2299 | 2299 | ||
2300 | void brcmf_p2p_ifp_removed(struct brcmf_if *ifp) | 2300 | void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked) |
2301 | { | 2301 | { |
2302 | struct brcmf_cfg80211_info *cfg; | 2302 | struct brcmf_cfg80211_info *cfg; |
2303 | struct brcmf_cfg80211_vif *vif; | 2303 | struct brcmf_cfg80211_vif *vif; |
@@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp) | |||
2306 | vif = ifp->vif; | 2306 | vif = ifp->vif; |
2307 | cfg = wdev_to_cfg(&vif->wdev); | 2307 | cfg = wdev_to_cfg(&vif->wdev); |
2308 | cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; | 2308 | cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; |
2309 | rtnl_lock(); | 2309 | if (!rtnl_locked) |
2310 | rtnl_lock(); | ||
2310 | cfg80211_unregister_wdev(&vif->wdev); | 2311 | cfg80211_unregister_wdev(&vif->wdev); |
2311 | rtnl_unlock(); | 2312 | if (!rtnl_locked) |
2313 | rtnl_unlock(); | ||
2312 | brcmf_free_vif(vif); | 2314 | brcmf_free_vif(vif); |
2313 | } | 2315 | } |
2314 | 2316 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index a3bd18c2360b..8ce9447533ef 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h | |||
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, | |||
155 | int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); | 155 | int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); |
156 | int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, | 156 | int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, |
157 | enum brcmf_fil_p2p_if_types if_type); | 157 | enum brcmf_fil_p2p_if_types if_type); |
158 | void brcmf_p2p_ifp_removed(struct brcmf_if *ifp); | 158 | void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked); |
159 | int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); | 159 | int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); |
160 | void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); | 160 | void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); |
161 | int brcmf_p2p_scan_prep(struct wiphy *wiphy, | 161 | int brcmf_p2p_scan_prep(struct wiphy *wiphy, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 1abcabb9b6cd..46b52bf705fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | |||
@@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) | |||
960 | } | 960 | } |
961 | 961 | ||
962 | mvm->fw_dbg_conf = conf_id; | 962 | mvm->fw_dbg_conf = conf_id; |
963 | return ret; | 963 | |
964 | return 0; | ||
964 | } | 965 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h index f7dff7612c9c..e9f1be9da7d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h | |||
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, | |||
105 | { | 105 | { |
106 | u32 trig_vif = le32_to_cpu(trig->vif_type); | 106 | u32 trig_vif = le32_to_cpu(trig->vif_type); |
107 | 107 | ||
108 | return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; | 108 | return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || |
109 | ieee80211_vif_type_p2p(vif) == trig_vif; | ||
109 | } | 110 | } |
110 | 111 | ||
111 | static inline bool | 112 | static inline bool |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 6d6064534d59..5dd77e336617 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -624,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
624 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | | 624 | hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | |
625 | NL80211_FEATURE_LOW_PRIORITY_SCAN | | 625 | NL80211_FEATURE_LOW_PRIORITY_SCAN | |
626 | NL80211_FEATURE_P2P_GO_OPPPS | | 626 | NL80211_FEATURE_P2P_GO_OPPPS | |
627 | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | | ||
627 | NL80211_FEATURE_DYNAMIC_SMPS | | 628 | NL80211_FEATURE_DYNAMIC_SMPS | |
628 | NL80211_FEATURE_STATIC_SMPS | | 629 | NL80211_FEATURE_STATIC_SMPS | |
629 | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; | 630 | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b4fc86d5d7ef..6a615bb73042 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -467,6 +467,8 @@ struct iwl_mvm_vif { | |||
467 | static inline struct iwl_mvm_vif * | 467 | static inline struct iwl_mvm_vif * |
468 | iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) | 468 | iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) |
469 | { | 469 | { |
470 | if (!vif) | ||
471 | return NULL; | ||
470 | return (void *)vif->drv_priv; | 472 | return (void *)vif->drv_priv; |
471 | } | 473 | } |
472 | 474 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index c6585ab48df3..b3a87a31de30 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -513,6 +513,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |||
513 | int hdrlen = ieee80211_hdrlen(hdr->frame_control); | 513 | int hdrlen = ieee80211_hdrlen(hdr->frame_control); |
514 | int queue; | 514 | int queue; |
515 | 515 | ||
516 | /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used | ||
517 | * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel | ||
518 | * queue. STATION (HS2.0) uses the auxiliary context of the FW, | ||
519 | * and hence needs to be sent on the aux queue | ||
520 | */ | ||
521 | if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && | ||
522 | skb_info->control.vif->type == NL80211_IFTYPE_STATION) | ||
523 | IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; | ||
524 | |||
516 | memcpy(&info, skb->cb, sizeof(info)); | 525 | memcpy(&info, skb->cb, sizeof(info)); |
517 | 526 | ||
518 | if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) | 527 | if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) |
@@ -526,16 +535,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |||
526 | /* This holds the amsdu headers length */ | 535 | /* This holds the amsdu headers length */ |
527 | skb_info->driver_data[0] = (void *)(uintptr_t)0; | 536 | skb_info->driver_data[0] = (void *)(uintptr_t)0; |
528 | 537 | ||
529 | /* | ||
530 | * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used | ||
531 | * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel | ||
532 | * queue. STATION (HS2.0) uses the auxiliary context of the FW, | ||
533 | * and hence needs to be sent on the aux queue | ||
534 | */ | ||
535 | if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && | ||
536 | info.control.vif->type == NL80211_IFTYPE_STATION) | ||
537 | IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; | ||
538 | |||
539 | queue = info.hw_queue; | 538 | queue = info.hw_queue; |
540 | 539 | ||
541 | /* | 540 | /* |
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c index dc49c3de1f25..c47d6366875d 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c | |||
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, | |||
205 | 205 | ||
206 | do { | 206 | do { |
207 | /* Check if AMSDU can accommodate this MSDU */ | 207 | /* Check if AMSDU can accommodate this MSDU */ |
208 | if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) | 208 | if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) > |
209 | adapter->tx_buf_size) | ||
209 | break; | 210 | break; |
210 | 211 | ||
211 | skb_src = skb_dequeue(&pra_list->skb_head); | 212 | skb_src = skb_dequeue(&pra_list->skb_head); |
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 1d689169da76..9e1f2d9c9865 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c | |||
@@ -5700,10 +5700,11 @@ out: | |||
5700 | mutex_unlock(&wl->mutex); | 5700 | mutex_unlock(&wl->mutex); |
5701 | } | 5701 | } |
5702 | 5702 | ||
5703 | static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) | 5703 | static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw, |
5704 | struct ieee80211_sta *sta) | ||
5704 | { | 5705 | { |
5705 | struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; | 5706 | struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; |
5706 | struct wl1271 *wl = wl_sta->wl; | 5707 | struct wl1271 *wl = hw->priv; |
5707 | u8 hlid = wl_sta->hlid; | 5708 | u8 hlid = wl_sta->hlid; |
5708 | 5709 | ||
5709 | /* return in units of Kbps */ | 5710 | /* return in units of Kbps */ |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 6a31f2610c23..daf4c7867102 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -271,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev, | |||
271 | be->dev = dev; | 271 | be->dev = dev; |
272 | dev_set_drvdata(&dev->dev, be); | 272 | dev_set_drvdata(&dev->dev, be); |
273 | 273 | ||
274 | be->state = XenbusStateInitialising; | ||
275 | err = xenbus_switch_state(dev, XenbusStateInitialising); | ||
276 | if (err) | ||
277 | goto fail; | ||
278 | |||
274 | sg = 1; | 279 | sg = 1; |
275 | 280 | ||
276 | do { | 281 | do { |
@@ -383,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev, | |||
383 | 388 | ||
384 | be->hotplug_script = script; | 389 | be->hotplug_script = script; |
385 | 390 | ||
386 | err = xenbus_switch_state(dev, XenbusStateInitWait); | ||
387 | if (err) | ||
388 | goto fail; | ||
389 | |||
390 | be->state = XenbusStateInitWait; | ||
391 | 391 | ||
392 | /* This kicks hotplug scripts, so do it immediately. */ | 392 | /* This kicks hotplug scripts, so do it immediately. */ |
393 | err = backend_create_xenvif(be); | 393 | err = backend_create_xenvif(be); |
@@ -492,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be, | |||
492 | 492 | ||
493 | /* Handle backend state transitions: | 493 | /* Handle backend state transitions: |
494 | * | 494 | * |
495 | * The backend state starts in InitWait and the following transitions are | 495 | * The backend state starts in Initialising and the following transitions are |
496 | * allowed. | 496 | * allowed. |
497 | * | 497 | * |
498 | * InitWait -> Connected | 498 | * Initialising -> InitWait -> Connected |
499 | * | 499 | * \ |
500 | * ^ \ | | 500 | * \ ^ \ | |
501 | * | \ | | 501 | * \ | \ | |
502 | * | \ | | 502 | * \ | \ | |
503 | * | \ | | 503 | * \ | \ | |
504 | * | \ | | 504 | * \ | \ | |
505 | * | \ | | 505 | * \ | \ | |
506 | * | V V | 506 | * V | V V |
507 | * | 507 | * |
508 | * Closed <-> Closing | 508 | * Closed <-> Closing |
509 | * | 509 | * |
510 | * The state argument specifies the eventual state of the backend and the | 510 | * The state argument specifies the eventual state of the backend and the |
511 | * function transitions to that state via the shortest path. | 511 | * function transitions to that state via the shortest path. |
@@ -515,6 +515,20 @@ static void set_backend_state(struct backend_info *be, | |||
515 | { | 515 | { |
516 | while (be->state != state) { | 516 | while (be->state != state) { |
517 | switch (be->state) { | 517 | switch (be->state) { |
518 | case XenbusStateInitialising: | ||
519 | switch (state) { | ||
520 | case XenbusStateInitWait: | ||
521 | case XenbusStateConnected: | ||
522 | case XenbusStateClosing: | ||
523 | backend_switch_state(be, XenbusStateInitWait); | ||
524 | break; | ||
525 | case XenbusStateClosed: | ||
526 | backend_switch_state(be, XenbusStateClosed); | ||
527 | break; | ||
528 | default: | ||
529 | BUG(); | ||
530 | } | ||
531 | break; | ||
518 | case XenbusStateClosed: | 532 | case XenbusStateClosed: |
519 | switch (state) { | 533 | switch (state) { |
520 | case XenbusStateInitWait: | 534 | case XenbusStateInitWait: |
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 458daf927336..935866fe5ec2 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c | |||
@@ -185,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, | |||
185 | return -ENXIO; | 185 | return -ENXIO; |
186 | 186 | ||
187 | nd_desc = nvdimm_bus->nd_desc; | 187 | nd_desc = nvdimm_bus->nd_desc; |
188 | /* | ||
189 | * if ndctl does not exist, it's PMEM_LEGACY and | ||
190 | * we want to just pretend everything is handled. | ||
191 | */ | ||
188 | if (!nd_desc->ndctl) | 192 | if (!nd_desc->ndctl) |
189 | return -ENXIO; | 193 | return len; |
190 | 194 | ||
191 | memset(&ars_cap, 0, sizeof(ars_cap)); | 195 | memset(&ars_cap, 0, sizeof(ars_cap)); |
192 | ars_cap.address = phys; | 196 | ars_cap.address = phys; |
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index db39d53cdfb9..f7d37a62f874 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig | |||
@@ -30,8 +30,8 @@ config NVME_FABRICS | |||
30 | 30 | ||
31 | config NVME_RDMA | 31 | config NVME_RDMA |
32 | tristate "NVM Express over Fabrics RDMA host driver" | 32 | tristate "NVM Express over Fabrics RDMA host driver" |
33 | depends on INFINIBAND | 33 | depends on INFINIBAND && BLOCK |
34 | depends on BLK_DEV_NVME | 34 | select NVME_CORE |
35 | select NVME_FABRICS | 35 | select NVME_FABRICS |
36 | select SG_POOL | 36 | select SG_POOL |
37 | help | 37 | help |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7ff2e820bbf4..2feacc70bf61 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request); | |||
81 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | 81 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
82 | enum nvme_ctrl_state new_state) | 82 | enum nvme_ctrl_state new_state) |
83 | { | 83 | { |
84 | enum nvme_ctrl_state old_state = ctrl->state; | 84 | enum nvme_ctrl_state old_state; |
85 | bool changed = false; | 85 | bool changed = false; |
86 | 86 | ||
87 | spin_lock_irq(&ctrl->lock); | 87 | spin_lock_irq(&ctrl->lock); |
88 | |||
89 | old_state = ctrl->state; | ||
88 | switch (new_state) { | 90 | switch (new_state) { |
89 | case NVME_CTRL_LIVE: | 91 | case NVME_CTRL_LIVE: |
90 | switch (old_state) { | 92 | switch (old_state) { |
@@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
140 | default: | 142 | default: |
141 | break; | 143 | break; |
142 | } | 144 | } |
143 | spin_unlock_irq(&ctrl->lock); | ||
144 | 145 | ||
145 | if (changed) | 146 | if (changed) |
146 | ctrl->state = new_state; | 147 | ctrl->state = new_state; |
147 | 148 | ||
149 | spin_unlock_irq(&ctrl->lock); | ||
150 | |||
148 | return changed; | 151 | return changed; |
149 | } | 152 | } |
150 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); | 153 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); |
@@ -608,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, | |||
608 | 611 | ||
609 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, | 612 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
610 | NVME_QID_ANY, 0, 0); | 613 | NVME_QID_ANY, 0, 0); |
611 | if (ret >= 0) | 614 | if (ret >= 0 && result) |
612 | *result = le32_to_cpu(cqe.result); | 615 | *result = le32_to_cpu(cqe.result); |
613 | return ret; | 616 | return ret; |
614 | } | 617 | } |
@@ -628,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, | |||
628 | 631 | ||
629 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, | 632 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
630 | NVME_QID_ANY, 0, 0); | 633 | NVME_QID_ANY, 0, 0); |
631 | if (ret >= 0) | 634 | if (ret >= 0 && result) |
632 | *result = le32_to_cpu(cqe.result); | 635 | *result = le32_to_cpu(cqe.result); |
633 | return ret; | 636 | return ret; |
634 | } | 637 | } |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index dc996761042f..4eff49174466 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn) | |||
47 | 47 | ||
48 | mutex_lock(&nvmf_hosts_mutex); | 48 | mutex_lock(&nvmf_hosts_mutex); |
49 | host = __nvmf_host_find(hostnqn); | 49 | host = __nvmf_host_find(hostnqn); |
50 | if (host) | 50 | if (host) { |
51 | kref_get(&host->ref); | ||
51 | goto out_unlock; | 52 | goto out_unlock; |
53 | } | ||
52 | 54 | ||
53 | host = kmalloc(sizeof(*host), GFP_KERNEL); | 55 | host = kmalloc(sizeof(*host), GFP_KERNEL); |
54 | if (!host) | 56 | if (!host) |
@@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn) | |||
56 | 58 | ||
57 | kref_init(&host->ref); | 59 | kref_init(&host->ref); |
58 | memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE); | 60 | memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE); |
59 | uuid_le_gen(&host->id); | 61 | uuid_be_gen(&host->id); |
60 | 62 | ||
61 | list_add_tail(&host->list, &nvmf_hosts); | 63 | list_add_tail(&host->list, &nvmf_hosts); |
62 | out_unlock: | 64 | out_unlock: |
@@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void) | |||
73 | return NULL; | 75 | return NULL; |
74 | 76 | ||
75 | kref_init(&host->ref); | 77 | kref_init(&host->ref); |
76 | uuid_le_gen(&host->id); | 78 | uuid_be_gen(&host->id); |
77 | snprintf(host->nqn, NVMF_NQN_SIZE, | 79 | snprintf(host->nqn, NVMF_NQN_SIZE, |
78 | "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id); | 80 | "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id); |
79 | 81 | ||
80 | mutex_lock(&nvmf_hosts_mutex); | 82 | mutex_lock(&nvmf_hosts_mutex); |
81 | list_add_tail(&host->list, &nvmf_hosts); | 83 | list_add_tail(&host->list, &nvmf_hosts); |
@@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) | |||
363 | cmd.connect.opcode = nvme_fabrics_command; | 365 | cmd.connect.opcode = nvme_fabrics_command; |
364 | cmd.connect.fctype = nvme_fabrics_type_connect; | 366 | cmd.connect.fctype = nvme_fabrics_type_connect; |
365 | cmd.connect.qid = 0; | 367 | cmd.connect.qid = 0; |
366 | cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); | 368 | |
369 | /* | ||
370 | * fabrics spec sets a minimum of depth 32 for admin queue, | ||
371 | * so set the queue with this depth always until | ||
372 | * justification otherwise. | ||
373 | */ | ||
374 | cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1); | ||
375 | |||
367 | /* | 376 | /* |
368 | * Set keep-alive timeout in seconds granularity (ms * 1000) | 377 | * Set keep-alive timeout in seconds granularity (ms * 1000) |
369 | * and add a grace period for controller kato enforcement | 378 | * and add a grace period for controller kato enforcement |
@@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) | |||
375 | if (!data) | 384 | if (!data) |
376 | return -ENOMEM; | 385 | return -ENOMEM; |
377 | 386 | ||
378 | memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le)); | 387 | memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be)); |
379 | data->cntlid = cpu_to_le16(0xffff); | 388 | data->cntlid = cpu_to_le16(0xffff); |
380 | strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); | 389 | strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); |
381 | strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); | 390 | strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); |
@@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) | |||
434 | if (!data) | 443 | if (!data) |
435 | return -ENOMEM; | 444 | return -ENOMEM; |
436 | 445 | ||
437 | memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le)); | 446 | memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be)); |
438 | data->cntlid = cpu_to_le16(ctrl->cntlid); | 447 | data->cntlid = cpu_to_le16(ctrl->cntlid); |
439 | strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); | 448 | strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); |
440 | strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); | 449 | strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); |
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 89df52c8be97..46e460aee52d 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h | |||
@@ -34,7 +34,7 @@ struct nvmf_host { | |||
34 | struct kref ref; | 34 | struct kref ref; |
35 | struct list_head list; | 35 | struct list_head list; |
36 | char nqn[NVMF_NQN_SIZE]; | 36 | char nqn[NVMF_NQN_SIZE]; |
37 | uuid_le id; | 37 | uuid_be id; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | /** | 40 | /** |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8dcf5a960951..60f7eab11865 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1693,7 +1693,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | |||
1693 | nvme_suspend_queue(dev->queues[i]); | 1693 | nvme_suspend_queue(dev->queues[i]); |
1694 | 1694 | ||
1695 | if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { | 1695 | if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { |
1696 | nvme_suspend_queue(dev->queues[0]); | 1696 | /* A device might become IO incapable very soon during |
1697 | * probe, before the admin queue is configured. Thus, | ||
1698 | * queue_count can be 0 here. | ||
1699 | */ | ||
1700 | if (dev->queue_count) | ||
1701 | nvme_suspend_queue(dev->queues[0]); | ||
1697 | } else { | 1702 | } else { |
1698 | nvme_disable_io_queues(dev); | 1703 | nvme_disable_io_queues(dev); |
1699 | nvme_disable_admin_queue(dev, shutdown); | 1704 | nvme_disable_admin_queue(dev, shutdown); |
@@ -2112,6 +2117,8 @@ static const struct pci_device_id nvme_id_table[] = { | |||
2112 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, | 2117 | .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, |
2113 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ | 2118 | { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ |
2114 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, | 2119 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, |
2120 | { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ | ||
2121 | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, | ||
2115 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, | 2122 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
2116 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, | 2123 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, |
2117 | { 0, } | 2124 | { 0, } |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 8d2875b4c56d..fbdb2267e460 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -43,10 +43,6 @@ | |||
43 | 43 | ||
44 | #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 | 44 | #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 |
45 | 45 | ||
46 | #define NVME_RDMA_MAX_PAGES_PER_MR 512 | ||
47 | |||
48 | #define NVME_RDMA_DEF_RECONNECT_DELAY 20 | ||
49 | |||
50 | /* | 46 | /* |
51 | * We handle AEN commands ourselves and don't even let the | 47 | * We handle AEN commands ourselves and don't even let the |
52 | * block layer know about them. | 48 | * block layer know about them. |
@@ -77,7 +73,6 @@ struct nvme_rdma_request { | |||
77 | u32 num_sge; | 73 | u32 num_sge; |
78 | int nents; | 74 | int nents; |
79 | bool inline_data; | 75 | bool inline_data; |
80 | bool need_inval; | ||
81 | struct ib_reg_wr reg_wr; | 76 | struct ib_reg_wr reg_wr; |
82 | struct ib_cqe reg_cqe; | 77 | struct ib_cqe reg_cqe; |
83 | struct nvme_rdma_queue *queue; | 78 | struct nvme_rdma_queue *queue; |
@@ -87,6 +82,8 @@ struct nvme_rdma_request { | |||
87 | 82 | ||
88 | enum nvme_rdma_queue_flags { | 83 | enum nvme_rdma_queue_flags { |
89 | NVME_RDMA_Q_CONNECTED = (1 << 0), | 84 | NVME_RDMA_Q_CONNECTED = (1 << 0), |
85 | NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), | ||
86 | NVME_RDMA_Q_DELETING = (1 << 2), | ||
90 | }; | 87 | }; |
91 | 88 | ||
92 | struct nvme_rdma_queue { | 89 | struct nvme_rdma_queue { |
@@ -286,7 +283,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq) | |||
286 | struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); | 283 | struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); |
287 | int ret = 0; | 284 | int ret = 0; |
288 | 285 | ||
289 | if (!req->need_inval) | 286 | if (!req->mr->need_inval) |
290 | goto out; | 287 | goto out; |
291 | 288 | ||
292 | ib_dereg_mr(req->mr); | 289 | ib_dereg_mr(req->mr); |
@@ -296,9 +293,10 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq) | |||
296 | if (IS_ERR(req->mr)) { | 293 | if (IS_ERR(req->mr)) { |
297 | ret = PTR_ERR(req->mr); | 294 | ret = PTR_ERR(req->mr); |
298 | req->mr = NULL; | 295 | req->mr = NULL; |
296 | goto out; | ||
299 | } | 297 | } |
300 | 298 | ||
301 | req->need_inval = false; | 299 | req->mr->need_inval = false; |
302 | 300 | ||
303 | out: | 301 | out: |
304 | return ret; | 302 | return ret; |
@@ -485,9 +483,14 @@ out_err: | |||
485 | 483 | ||
486 | static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) | 484 | static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) |
487 | { | 485 | { |
488 | struct nvme_rdma_device *dev = queue->device; | 486 | struct nvme_rdma_device *dev; |
489 | struct ib_device *ibdev = dev->dev; | 487 | struct ib_device *ibdev; |
488 | |||
489 | if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags)) | ||
490 | return; | ||
490 | 491 | ||
492 | dev = queue->device; | ||
493 | ibdev = dev->dev; | ||
491 | rdma_destroy_qp(queue->cm_id); | 494 | rdma_destroy_qp(queue->cm_id); |
492 | ib_free_cq(queue->ib_cq); | 495 | ib_free_cq(queue->ib_cq); |
493 | 496 | ||
@@ -538,6 +541,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue, | |||
538 | ret = -ENOMEM; | 541 | ret = -ENOMEM; |
539 | goto out_destroy_qp; | 542 | goto out_destroy_qp; |
540 | } | 543 | } |
544 | set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags); | ||
541 | 545 | ||
542 | return 0; | 546 | return 0; |
543 | 547 | ||
@@ -590,11 +594,13 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, | |||
590 | goto out_destroy_cm_id; | 594 | goto out_destroy_cm_id; |
591 | } | 595 | } |
592 | 596 | ||
597 | clear_bit(NVME_RDMA_Q_DELETING, &queue->flags); | ||
593 | set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); | 598 | set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); |
594 | 599 | ||
595 | return 0; | 600 | return 0; |
596 | 601 | ||
597 | out_destroy_cm_id: | 602 | out_destroy_cm_id: |
603 | nvme_rdma_destroy_queue_ib(queue); | ||
598 | rdma_destroy_id(queue->cm_id); | 604 | rdma_destroy_id(queue->cm_id); |
599 | return ret; | 605 | return ret; |
600 | } | 606 | } |
@@ -613,7 +619,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) | |||
613 | 619 | ||
614 | static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue) | 620 | static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue) |
615 | { | 621 | { |
616 | if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) | 622 | if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) |
617 | return; | 623 | return; |
618 | nvme_rdma_stop_queue(queue); | 624 | nvme_rdma_stop_queue(queue); |
619 | nvme_rdma_free_queue(queue); | 625 | nvme_rdma_free_queue(queue); |
@@ -645,7 +651,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
645 | int i, ret; | 651 | int i, ret; |
646 | 652 | ||
647 | for (i = 1; i < ctrl->queue_count; i++) { | 653 | for (i = 1; i < ctrl->queue_count; i++) { |
648 | ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize); | 654 | ret = nvme_rdma_init_queue(ctrl, i, |
655 | ctrl->ctrl.opts->queue_size); | ||
649 | if (ret) { | 656 | if (ret) { |
650 | dev_info(ctrl->ctrl.device, | 657 | dev_info(ctrl->ctrl.device, |
651 | "failed to initialize i/o queue: %d\n", ret); | 658 | "failed to initialize i/o queue: %d\n", ret); |
@@ -656,7 +663,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
656 | return 0; | 663 | return 0; |
657 | 664 | ||
658 | out_free_queues: | 665 | out_free_queues: |
659 | for (; i >= 1; i--) | 666 | for (i--; i >= 1; i--) |
660 | nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); | 667 | nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); |
661 | 668 | ||
662 | return ret; | 669 | return ret; |
@@ -765,8 +772,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) | |||
765 | { | 772 | { |
766 | struct nvme_rdma_ctrl *ctrl = container_of(work, | 773 | struct nvme_rdma_ctrl *ctrl = container_of(work, |
767 | struct nvme_rdma_ctrl, err_work); | 774 | struct nvme_rdma_ctrl, err_work); |
775 | int i; | ||
768 | 776 | ||
769 | nvme_stop_keep_alive(&ctrl->ctrl); | 777 | nvme_stop_keep_alive(&ctrl->ctrl); |
778 | |||
779 | for (i = 0; i < ctrl->queue_count; i++) | ||
780 | clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); | ||
781 | |||
770 | if (ctrl->queue_count > 1) | 782 | if (ctrl->queue_count > 1) |
771 | nvme_stop_queues(&ctrl->ctrl); | 783 | nvme_stop_queues(&ctrl->ctrl); |
772 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); | 784 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); |
@@ -849,7 +861,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, | |||
849 | if (!blk_rq_bytes(rq)) | 861 | if (!blk_rq_bytes(rq)) |
850 | return; | 862 | return; |
851 | 863 | ||
852 | if (req->need_inval) { | 864 | if (req->mr->need_inval) { |
853 | res = nvme_rdma_inv_rkey(queue, req); | 865 | res = nvme_rdma_inv_rkey(queue, req); |
854 | if (res < 0) { | 866 | if (res < 0) { |
855 | dev_err(ctrl->ctrl.device, | 867 | dev_err(ctrl->ctrl.device, |
@@ -935,7 +947,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, | |||
935 | IB_ACCESS_REMOTE_READ | | 947 | IB_ACCESS_REMOTE_READ | |
936 | IB_ACCESS_REMOTE_WRITE; | 948 | IB_ACCESS_REMOTE_WRITE; |
937 | 949 | ||
938 | req->need_inval = true; | 950 | req->mr->need_inval = true; |
939 | 951 | ||
940 | sg->addr = cpu_to_le64(req->mr->iova); | 952 | sg->addr = cpu_to_le64(req->mr->iova); |
941 | put_unaligned_le24(req->mr->length, sg->length); | 953 | put_unaligned_le24(req->mr->length, sg->length); |
@@ -958,7 +970,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, | |||
958 | 970 | ||
959 | req->num_sge = 1; | 971 | req->num_sge = 1; |
960 | req->inline_data = false; | 972 | req->inline_data = false; |
961 | req->need_inval = false; | 973 | req->mr->need_inval = false; |
962 | 974 | ||
963 | c->common.flags |= NVME_CMD_SGL_METABUF; | 975 | c->common.flags |= NVME_CMD_SGL_METABUF; |
964 | 976 | ||
@@ -1145,7 +1157,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, | |||
1145 | 1157 | ||
1146 | if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && | 1158 | if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && |
1147 | wc->ex.invalidate_rkey == req->mr->rkey) | 1159 | wc->ex.invalidate_rkey == req->mr->rkey) |
1148 | req->need_inval = false; | 1160 | req->mr->need_inval = false; |
1149 | 1161 | ||
1150 | blk_mq_complete_request(rq, status); | 1162 | blk_mq_complete_request(rq, status); |
1151 | 1163 | ||
@@ -1278,8 +1290,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) | |||
1278 | 1290 | ||
1279 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); | 1291 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
1280 | priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); | 1292 | priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); |
1281 | priv.hrqsize = cpu_to_le16(queue->queue_size); | 1293 | /* |
1282 | priv.hsqsize = cpu_to_le16(queue->queue_size); | 1294 | * set the admin queue depth to the minimum size |
1295 | * specified by the Fabrics standard. | ||
1296 | */ | ||
1297 | if (priv.qid == 0) { | ||
1298 | priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH); | ||
1299 | priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1); | ||
1300 | } else { | ||
1301 | /* | ||
1302 | * current interpretation of the fabrics spec | ||
1303 | * is at minimum you make hrqsize sqsize+1, or a | ||
1304 | * 1's based representation of sqsize. | ||
1305 | */ | ||
1306 | priv.hrqsize = cpu_to_le16(queue->queue_size); | ||
1307 | priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); | ||
1308 | } | ||
1283 | 1309 | ||
1284 | ret = rdma_connect(queue->cm_id, ¶m); | 1310 | ret = rdma_connect(queue->cm_id, ¶m); |
1285 | if (ret) { | 1311 | if (ret) { |
@@ -1295,58 +1321,6 @@ out_destroy_queue_ib: | |||
1295 | return ret; | 1321 | return ret; |
1296 | } | 1322 | } |
1297 | 1323 | ||
1298 | /** | ||
1299 | * nvme_rdma_device_unplug() - Handle RDMA device unplug | ||
1300 | * @queue: Queue that owns the cm_id that caught the event | ||
1301 | * | ||
1302 | * DEVICE_REMOVAL event notifies us that the RDMA device is about | ||
1303 | * to unplug so we should take care of destroying our RDMA resources. | ||
1304 | * This event will be generated for each allocated cm_id. | ||
1305 | * | ||
1306 | * In our case, the RDMA resources are managed per controller and not | ||
1307 | * only per queue. So the way we handle this is we trigger an implicit | ||
1308 | * controller deletion upon the first DEVICE_REMOVAL event we see, and | ||
1309 | * hold the event inflight until the controller deletion is completed. | ||
1310 | * | ||
1311 | * One exception that we need to handle is the destruction of the cm_id | ||
1312 | * that caught the event. Since we hold the callout until the controller | ||
1313 | * deletion is completed, we'll deadlock if the controller deletion will | ||
1314 | * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership | ||
1315 | * of destroying this queue before-hand, destroy the queue resources, | ||
1316 | * then queue the controller deletion which won't destroy this queue and | ||
1317 | * we destroy the cm_id implicitely by returning a non-zero rc to the callout. | ||
1318 | */ | ||
1319 | static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) | ||
1320 | { | ||
1321 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; | ||
1322 | int ret; | ||
1323 | |||
1324 | /* Own the controller deletion */ | ||
1325 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) | ||
1326 | return 0; | ||
1327 | |||
1328 | dev_warn(ctrl->ctrl.device, | ||
1329 | "Got rdma device removal event, deleting ctrl\n"); | ||
1330 | |||
1331 | /* Get rid of reconnect work if its running */ | ||
1332 | cancel_delayed_work_sync(&ctrl->reconnect_work); | ||
1333 | |||
1334 | /* Disable the queue so ctrl delete won't free it */ | ||
1335 | if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) { | ||
1336 | /* Free this queue ourselves */ | ||
1337 | nvme_rdma_stop_queue(queue); | ||
1338 | nvme_rdma_destroy_queue_ib(queue); | ||
1339 | |||
1340 | /* Return non-zero so the cm_id will destroy implicitly */ | ||
1341 | ret = 1; | ||
1342 | } | ||
1343 | |||
1344 | /* Queue controller deletion */ | ||
1345 | queue_work(nvme_rdma_wq, &ctrl->delete_work); | ||
1346 | flush_work(&ctrl->delete_work); | ||
1347 | return ret; | ||
1348 | } | ||
1349 | |||
1350 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, | 1324 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, |
1351 | struct rdma_cm_event *ev) | 1325 | struct rdma_cm_event *ev) |
1352 | { | 1326 | { |
@@ -1388,8 +1362,8 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, | |||
1388 | nvme_rdma_error_recovery(queue->ctrl); | 1362 | nvme_rdma_error_recovery(queue->ctrl); |
1389 | break; | 1363 | break; |
1390 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 1364 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
1391 | /* return 1 means impliciy CM ID destroy */ | 1365 | /* device removal is handled via the ib_client API */ |
1392 | return nvme_rdma_device_unplug(queue); | 1366 | break; |
1393 | default: | 1367 | default: |
1394 | dev_err(queue->ctrl->ctrl.device, | 1368 | dev_err(queue->ctrl->ctrl.device, |
1395 | "Unexpected RDMA CM event (%d)\n", ev->event); | 1369 | "Unexpected RDMA CM event (%d)\n", ev->event); |
@@ -1461,7 +1435,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
1461 | if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) | 1435 | if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) |
1462 | flush = true; | 1436 | flush = true; |
1463 | ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, | 1437 | ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, |
1464 | req->need_inval ? &req->reg_wr.wr : NULL, flush); | 1438 | req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); |
1465 | if (ret) { | 1439 | if (ret) { |
1466 | nvme_rdma_unmap_data(queue, rq); | 1440 | nvme_rdma_unmap_data(queue, rq); |
1467 | goto err; | 1441 | goto err; |
@@ -1690,15 +1664,19 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) | |||
1690 | static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl) | 1664 | static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl) |
1691 | { | 1665 | { |
1692 | struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); | 1666 | struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); |
1693 | int ret; | 1667 | int ret = 0; |
1694 | 1668 | ||
1669 | /* | ||
1670 | * Keep a reference until all work is flushed since | ||
1671 | * __nvme_rdma_del_ctrl can free the ctrl mem | ||
1672 | */ | ||
1673 | if (!kref_get_unless_zero(&ctrl->ctrl.kref)) | ||
1674 | return -EBUSY; | ||
1695 | ret = __nvme_rdma_del_ctrl(ctrl); | 1675 | ret = __nvme_rdma_del_ctrl(ctrl); |
1696 | if (ret) | 1676 | if (!ret) |
1697 | return ret; | 1677 | flush_work(&ctrl->delete_work); |
1698 | 1678 | nvme_put_ctrl(&ctrl->ctrl); | |
1699 | flush_work(&ctrl->delete_work); | 1679 | return ret; |
1700 | |||
1701 | return 0; | ||
1702 | } | 1680 | } |
1703 | 1681 | ||
1704 | static void nvme_rdma_remove_ctrl_work(struct work_struct *work) | 1682 | static void nvme_rdma_remove_ctrl_work(struct work_struct *work) |
@@ -1816,7 +1794,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
1816 | 1794 | ||
1817 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); | 1795 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
1818 | ctrl->tag_set.ops = &nvme_rdma_mq_ops; | 1796 | ctrl->tag_set.ops = &nvme_rdma_mq_ops; |
1819 | ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; | 1797 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; |
1820 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | 1798 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ |
1821 | ctrl->tag_set.numa_node = NUMA_NO_NODE; | 1799 | ctrl->tag_set.numa_node = NUMA_NO_NODE; |
1822 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 1800 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
@@ -1914,7 +1892,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
1914 | spin_lock_init(&ctrl->lock); | 1892 | spin_lock_init(&ctrl->lock); |
1915 | 1893 | ||
1916 | ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ | 1894 | ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ |
1917 | ctrl->ctrl.sqsize = opts->queue_size; | 1895 | ctrl->ctrl.sqsize = opts->queue_size - 1; |
1918 | ctrl->ctrl.kato = opts->kato; | 1896 | ctrl->ctrl.kato = opts->kato; |
1919 | 1897 | ||
1920 | ret = -ENOMEM; | 1898 | ret = -ENOMEM; |
@@ -1995,27 +1973,57 @@ static struct nvmf_transport_ops nvme_rdma_transport = { | |||
1995 | .create_ctrl = nvme_rdma_create_ctrl, | 1973 | .create_ctrl = nvme_rdma_create_ctrl, |
1996 | }; | 1974 | }; |
1997 | 1975 | ||
1976 | static void nvme_rdma_add_one(struct ib_device *ib_device) | ||
1977 | { | ||
1978 | } | ||
1979 | |||
1980 | static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) | ||
1981 | { | ||
1982 | struct nvme_rdma_ctrl *ctrl; | ||
1983 | |||
1984 | /* Delete all controllers using this device */ | ||
1985 | mutex_lock(&nvme_rdma_ctrl_mutex); | ||
1986 | list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { | ||
1987 | if (ctrl->device->dev != ib_device) | ||
1988 | continue; | ||
1989 | dev_info(ctrl->ctrl.device, | ||
1990 | "Removing ctrl: NQN \"%s\", addr %pISp\n", | ||
1991 | ctrl->ctrl.opts->subsysnqn, &ctrl->addr); | ||
1992 | __nvme_rdma_del_ctrl(ctrl); | ||
1993 | } | ||
1994 | mutex_unlock(&nvme_rdma_ctrl_mutex); | ||
1995 | |||
1996 | flush_workqueue(nvme_rdma_wq); | ||
1997 | } | ||
1998 | |||
1999 | static struct ib_client nvme_rdma_ib_client = { | ||
2000 | .name = "nvme_rdma", | ||
2001 | .add = nvme_rdma_add_one, | ||
2002 | .remove = nvme_rdma_remove_one | ||
2003 | }; | ||
2004 | |||
1998 | static int __init nvme_rdma_init_module(void) | 2005 | static int __init nvme_rdma_init_module(void) |
1999 | { | 2006 | { |
2007 | int ret; | ||
2008 | |||
2000 | nvme_rdma_wq = create_workqueue("nvme_rdma_wq"); | 2009 | nvme_rdma_wq = create_workqueue("nvme_rdma_wq"); |
2001 | if (!nvme_rdma_wq) | 2010 | if (!nvme_rdma_wq) |
2002 | return -ENOMEM; | 2011 | return -ENOMEM; |
2003 | 2012 | ||
2013 | ret = ib_register_client(&nvme_rdma_ib_client); | ||
2014 | if (ret) { | ||
2015 | destroy_workqueue(nvme_rdma_wq); | ||
2016 | return ret; | ||
2017 | } | ||
2018 | |||
2004 | nvmf_register_transport(&nvme_rdma_transport); | 2019 | nvmf_register_transport(&nvme_rdma_transport); |
2005 | return 0; | 2020 | return 0; |
2006 | } | 2021 | } |
2007 | 2022 | ||
2008 | static void __exit nvme_rdma_cleanup_module(void) | 2023 | static void __exit nvme_rdma_cleanup_module(void) |
2009 | { | 2024 | { |
2010 | struct nvme_rdma_ctrl *ctrl; | ||
2011 | |||
2012 | nvmf_unregister_transport(&nvme_rdma_transport); | 2025 | nvmf_unregister_transport(&nvme_rdma_transport); |
2013 | 2026 | ib_unregister_client(&nvme_rdma_ib_client); | |
2014 | mutex_lock(&nvme_rdma_ctrl_mutex); | ||
2015 | list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) | ||
2016 | __nvme_rdma_del_ctrl(ctrl); | ||
2017 | mutex_unlock(&nvme_rdma_ctrl_mutex); | ||
2018 | |||
2019 | destroy_workqueue(nvme_rdma_wq); | 2027 | destroy_workqueue(nvme_rdma_wq); |
2020 | } | 2028 | } |
2021 | 2029 | ||
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index a5c31cbeb481..3a5b9d0576cb 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig | |||
@@ -15,8 +15,8 @@ config NVME_TARGET | |||
15 | 15 | ||
16 | config NVME_TARGET_LOOP | 16 | config NVME_TARGET_LOOP |
17 | tristate "NVMe loopback device support" | 17 | tristate "NVMe loopback device support" |
18 | depends on BLK_DEV_NVME | ||
19 | depends on NVME_TARGET | 18 | depends on NVME_TARGET |
19 | select NVME_CORE | ||
20 | select NVME_FABRICS | 20 | select NVME_FABRICS |
21 | select SG_POOL | 21 | select SG_POOL |
22 | help | 22 | help |
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 7affd40a6b33..395e60dad835 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -556,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) | |||
556 | 556 | ||
557 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); | 557 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
558 | ctrl->tag_set.ops = &nvme_loop_mq_ops; | 558 | ctrl->tag_set.ops = &nvme_loop_mq_ops; |
559 | ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; | 559 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; |
560 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | 560 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ |
561 | ctrl->tag_set.numa_node = NUMA_NO_NODE; | 561 | ctrl->tag_set.numa_node = NUMA_NO_NODE; |
562 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 562 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
@@ -620,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, | |||
620 | 620 | ||
621 | ret = -ENOMEM; | 621 | ret = -ENOMEM; |
622 | 622 | ||
623 | ctrl->ctrl.sqsize = opts->queue_size; | 623 | ctrl->ctrl.sqsize = opts->queue_size - 1; |
624 | ctrl->ctrl.kato = opts->kato; | 624 | ctrl->ctrl.kato = opts->kato; |
625 | 625 | ||
626 | ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), | 626 | ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index b4d648536c3e..1cbe6e053b5b 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -978,10 +978,11 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) | |||
978 | container_of(w, struct nvmet_rdma_queue, release_work); | 978 | container_of(w, struct nvmet_rdma_queue, release_work); |
979 | struct rdma_cm_id *cm_id = queue->cm_id; | 979 | struct rdma_cm_id *cm_id = queue->cm_id; |
980 | struct nvmet_rdma_device *dev = queue->dev; | 980 | struct nvmet_rdma_device *dev = queue->dev; |
981 | enum nvmet_rdma_queue_state state = queue->state; | ||
981 | 982 | ||
982 | nvmet_rdma_free_queue(queue); | 983 | nvmet_rdma_free_queue(queue); |
983 | 984 | ||
984 | if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL) | 985 | if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) |
985 | rdma_destroy_id(cm_id); | 986 | rdma_destroy_id(cm_id); |
986 | 987 | ||
987 | kref_put(&dev->ref, nvmet_rdma_free_dev); | 988 | kref_put(&dev->ref, nvmet_rdma_free_dev); |
@@ -1003,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, | |||
1003 | queue->host_qid = le16_to_cpu(req->qid); | 1004 | queue->host_qid = le16_to_cpu(req->qid); |
1004 | 1005 | ||
1005 | /* | 1006 | /* |
1006 | * req->hsqsize corresponds to our recv queue size | 1007 | * req->hsqsize corresponds to our recv queue size plus 1 |
1007 | * req->hrqsize corresponds to our send queue size | 1008 | * req->hrqsize corresponds to our send queue size |
1008 | */ | 1009 | */ |
1009 | queue->recv_queue_size = le16_to_cpu(req->hsqsize); | 1010 | queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; |
1010 | queue->send_queue_size = le16_to_cpu(req->hrqsize); | 1011 | queue->send_queue_size = le16_to_cpu(req->hrqsize); |
1011 | 1012 | ||
1012 | if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH) | 1013 | if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH) |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 7792266db259..3ce69536a7b3 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np, | |||
1631 | */ | 1631 | */ |
1632 | 1632 | ||
1633 | err: | 1633 | err: |
1634 | if (it.node) | 1634 | of_node_put(it.node); |
1635 | of_node_put(it.node); | ||
1636 | return rc; | 1635 | return rc; |
1637 | } | 1636 | } |
1638 | 1637 | ||
@@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs( | |||
2343 | const struct device_node *parent, int port_reg, int reg) | 2342 | const struct device_node *parent, int port_reg, int reg) |
2344 | { | 2343 | { |
2345 | struct of_endpoint endpoint; | 2344 | struct of_endpoint endpoint; |
2346 | struct device_node *node, *prev_node = NULL; | 2345 | struct device_node *node = NULL; |
2347 | |||
2348 | while (1) { | ||
2349 | node = of_graph_get_next_endpoint(parent, prev_node); | ||
2350 | of_node_put(prev_node); | ||
2351 | if (!node) | ||
2352 | break; | ||
2353 | 2346 | ||
2347 | for_each_endpoint_of_node(parent, node) { | ||
2354 | of_graph_parse_endpoint(node, &endpoint); | 2348 | of_graph_parse_endpoint(node, &endpoint); |
2355 | if (((port_reg == -1) || (endpoint.port == port_reg)) && | 2349 | if (((port_reg == -1) || (endpoint.port == port_reg)) && |
2356 | ((reg == -1) || (endpoint.id == reg))) | 2350 | ((reg == -1) || (endpoint.id == reg))) |
2357 | return node; | 2351 | return node; |
2358 | |||
2359 | prev_node = node; | ||
2360 | } | 2352 | } |
2361 | 2353 | ||
2362 | return NULL; | 2354 | return NULL; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 55f1b8391149..085c6389afd1 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob, | |||
517 | pr_warning("End of tree marker overwritten: %08x\n", | 517 | pr_warning("End of tree marker overwritten: %08x\n", |
518 | be32_to_cpup(mem + size)); | 518 | be32_to_cpup(mem + size)); |
519 | 519 | ||
520 | if (detached) { | 520 | if (detached && mynodes) { |
521 | of_node_set_flag(*mynodes, OF_DETACHED); | 521 | of_node_set_flag(*mynodes, OF_DETACHED); |
522 | pr_debug("unflattened tree is detached\n"); | 522 | pr_debug("unflattened tree is detached\n"); |
523 | } | 523 | } |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 89a71c6074fc..a2e68f740eda 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
544 | 544 | ||
545 | list_del(&desc->list); | 545 | list_del(&desc->list); |
546 | 546 | ||
547 | of_node_set_flag(desc->dev, OF_POPULATED); | ||
548 | |||
547 | pr_debug("of_irq_init: init %s (%p), parent %p\n", | 549 | pr_debug("of_irq_init: init %s (%p), parent %p\n", |
548 | desc->dev->full_name, | 550 | desc->dev->full_name, |
549 | desc->dev, desc->interrupt_parent); | 551 | desc->dev, desc->interrupt_parent); |
550 | ret = desc->irq_init_cb(desc->dev, | 552 | ret = desc->irq_init_cb(desc->dev, |
551 | desc->interrupt_parent); | 553 | desc->interrupt_parent); |
552 | if (ret) { | 554 | if (ret) { |
555 | of_node_clear_flag(desc->dev, OF_POPULATED); | ||
553 | kfree(desc); | 556 | kfree(desc); |
554 | continue; | 557 | continue; |
555 | } | 558 | } |
@@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
559 | * its children can get processed in a subsequent pass. | 562 | * its children can get processed in a subsequent pass. |
560 | */ | 563 | */ |
561 | list_add_tail(&desc->list, &intc_parent_list); | 564 | list_add_tail(&desc->list, &intc_parent_list); |
562 | |||
563 | of_node_set_flag(desc->dev, OF_POPULATED); | ||
564 | } | 565 | } |
565 | 566 | ||
566 | /* Get the next pending parent that might have children */ | 567 | /* Get the next pending parent that might have children */ |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 8aa197691074..f39ccd5aa701 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root, | |||
497 | } | 497 | } |
498 | EXPORT_SYMBOL_GPL(of_platform_default_populate); | 498 | EXPORT_SYMBOL_GPL(of_platform_default_populate); |
499 | 499 | ||
500 | #ifndef CONFIG_PPC | ||
500 | static int __init of_platform_default_populate_init(void) | 501 | static int __init of_platform_default_populate_init(void) |
501 | { | 502 | { |
502 | struct device_node *node; | 503 | struct device_node *node; |
@@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void) | |||
521 | return 0; | 522 | return 0; |
522 | } | 523 | } |
523 | arch_initcall_sync(of_platform_default_populate_init); | 524 | arch_initcall_sync(of_platform_default_populate_init); |
525 | #endif | ||
524 | 526 | ||
525 | static int of_platform_device_destroy(struct device *dev, void *data) | 527 | static int of_platform_device_destroy(struct device *dev, void *data) |
526 | { | 528 | { |
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c index 5f4a2e04c8d7..add66236215c 100644 --- a/drivers/pci/host-bridge.c +++ b/drivers/pci/host-bridge.c | |||
@@ -44,6 +44,7 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge, | |||
44 | bridge->release_fn = release_fn; | 44 | bridge->release_fn = release_fn; |
45 | bridge->release_data = release_data; | 45 | bridge->release_data = release_data; |
46 | } | 46 | } |
47 | EXPORT_SYMBOL_GPL(pci_set_host_bridge_release); | ||
47 | 48 | ||
48 | void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, | 49 | void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, |
49 | struct resource *res) | 50 | struct resource *res) |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index eafa6138a6b8..98f12223c734 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -1069,7 +1069,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1069 | nvec = maxvec; | 1069 | nvec = maxvec; |
1070 | 1070 | ||
1071 | for (;;) { | 1071 | for (;;) { |
1072 | if (!(flags & PCI_IRQ_NOAFFINITY)) { | 1072 | if (flags & PCI_IRQ_AFFINITY) { |
1073 | dev->irq_affinity = irq_create_affinity_mask(&nvec); | 1073 | dev->irq_affinity = irq_create_affinity_mask(&nvec); |
1074 | if (nvec < minvec) | 1074 | if (nvec < minvec) |
1075 | return -ENOSPC; | 1075 | return -ENOSPC; |
@@ -1105,7 +1105,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1105 | **/ | 1105 | **/ |
1106 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) | 1106 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) |
1107 | { | 1107 | { |
1108 | return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY); | 1108 | return __pci_enable_msi_range(dev, minvec, maxvec, 0); |
1109 | } | 1109 | } |
1110 | EXPORT_SYMBOL(pci_enable_msi_range); | 1110 | EXPORT_SYMBOL(pci_enable_msi_range); |
1111 | 1111 | ||
@@ -1120,7 +1120,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
1120 | return -ERANGE; | 1120 | return -ERANGE; |
1121 | 1121 | ||
1122 | for (;;) { | 1122 | for (;;) { |
1123 | if (!(flags & PCI_IRQ_NOAFFINITY)) { | 1123 | if (flags & PCI_IRQ_AFFINITY) { |
1124 | dev->irq_affinity = irq_create_affinity_mask(&nvec); | 1124 | dev->irq_affinity = irq_create_affinity_mask(&nvec); |
1125 | if (nvec < minvec) | 1125 | if (nvec < minvec) |
1126 | return -ENOSPC; | 1126 | return -ENOSPC; |
@@ -1160,8 +1160,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
1160 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | 1160 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, |
1161 | int minvec, int maxvec) | 1161 | int minvec, int maxvec) |
1162 | { | 1162 | { |
1163 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, | 1163 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0); |
1164 | PCI_IRQ_NOAFFINITY); | ||
1165 | } | 1164 | } |
1166 | EXPORT_SYMBOL(pci_enable_msix_range); | 1165 | EXPORT_SYMBOL(pci_enable_msix_range); |
1167 | 1166 | ||
@@ -1187,22 +1186,25 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, | |||
1187 | { | 1186 | { |
1188 | int vecs = -ENOSPC; | 1187 | int vecs = -ENOSPC; |
1189 | 1188 | ||
1190 | if (!(flags & PCI_IRQ_NOMSIX)) { | 1189 | if (flags & PCI_IRQ_MSIX) { |
1191 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, | 1190 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, |
1192 | flags); | 1191 | flags); |
1193 | if (vecs > 0) | 1192 | if (vecs > 0) |
1194 | return vecs; | 1193 | return vecs; |
1195 | } | 1194 | } |
1196 | 1195 | ||
1197 | if (!(flags & PCI_IRQ_NOMSI)) { | 1196 | if (flags & PCI_IRQ_MSI) { |
1198 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); | 1197 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); |
1199 | if (vecs > 0) | 1198 | if (vecs > 0) |
1200 | return vecs; | 1199 | return vecs; |
1201 | } | 1200 | } |
1202 | 1201 | ||
1203 | /* use legacy irq if allowed */ | 1202 | /* use legacy irq if allowed */ |
1204 | if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1) | 1203 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) { |
1204 | pci_intx(dev, 1); | ||
1205 | return 1; | 1205 | return 1; |
1206 | } | ||
1207 | |||
1206 | return vecs; | 1208 | return vecs; |
1207 | } | 1209 | } |
1208 | EXPORT_SYMBOL(pci_alloc_irq_vectors); | 1210 | EXPORT_SYMBOL(pci_alloc_irq_vectors); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 37ff0158e45f..44e0ff37480b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -3327,9 +3327,9 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev) | |||
3327 | if (nhi->vendor != PCI_VENDOR_ID_INTEL | 3327 | if (nhi->vendor != PCI_VENDOR_ID_INTEL |
3328 | || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && | 3328 | || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && |
3329 | nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && | 3329 | nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && |
3330 | nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI && | ||
3330 | nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) | 3331 | nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) |
3331 | || nhi->subsystem_vendor != 0x2222 | 3332 | || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8) |
3332 | || nhi->subsystem_device != 0x1111) | ||
3333 | goto out; | 3333 | goto out; |
3334 | dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); | 3334 | dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); |
3335 | device_pm_wait_for_dev(&dev->dev, &nhi->dev); | 3335 | device_pm_wait_for_dev(&dev->dev, &nhi->dev); |
@@ -3344,6 +3344,9 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, | |||
3344 | PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, | 3344 | PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, |
3345 | quirk_apple_wait_for_thunderbolt); | 3345 | quirk_apple_wait_for_thunderbolt); |
3346 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, | 3346 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, |
3347 | PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE, | ||
3348 | quirk_apple_wait_for_thunderbolt); | ||
3349 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, | ||
3347 | PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE, | 3350 | PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE, |
3348 | quirk_apple_wait_for_thunderbolt); | 3351 | quirk_apple_wait_for_thunderbolt); |
3349 | #endif | 3352 | #endif |
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index d1ef7acf6930..f9357e09e9b3 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c | |||
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev) | |||
40 | list_del(&dev->bus_list); | 40 | list_del(&dev->bus_list); |
41 | up_write(&pci_bus_sem); | 41 | up_write(&pci_bus_sem); |
42 | 42 | ||
43 | pci_bridge_d3_device_removed(dev); | ||
43 | pci_free_resources(dev); | 44 | pci_free_resources(dev); |
44 | put_device(&dev->dev); | 45 | put_device(&dev->dev); |
45 | } | 46 | } |
@@ -96,8 +97,6 @@ static void pci_remove_bus_device(struct pci_dev *dev) | |||
96 | dev->subordinate = NULL; | 97 | dev->subordinate = NULL; |
97 | } | 98 | } |
98 | 99 | ||
99 | pci_bridge_d3_device_removed(dev); | ||
100 | |||
101 | pci_destroy_dev(dev); | 100 | pci_destroy_dev(dev); |
102 | } | 101 | } |
103 | 102 | ||
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 489ea1098c96..69b5e811ea2b 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
977 | 977 | ||
978 | /************************ runtime PM support ***************************/ | 978 | /************************ runtime PM support ***************************/ |
979 | 979 | ||
980 | static int pcmcia_dev_suspend(struct device *dev, pm_message_t state); | 980 | static int pcmcia_dev_suspend(struct device *dev); |
981 | static int pcmcia_dev_resume(struct device *dev); | 981 | static int pcmcia_dev_resume(struct device *dev); |
982 | 982 | ||
983 | static int runtime_suspend(struct device *dev) | 983 | static int runtime_suspend(struct device *dev) |
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev) | |||
985 | int rc; | 985 | int rc; |
986 | 986 | ||
987 | device_lock(dev); | 987 | device_lock(dev); |
988 | rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND); | 988 | rc = pcmcia_dev_suspend(dev); |
989 | device_unlock(dev); | 989 | device_unlock(dev); |
990 | return rc; | 990 | return rc; |
991 | } | 991 | } |
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev); | |||
1135 | 1135 | ||
1136 | /* PM support, also needed for reset */ | 1136 | /* PM support, also needed for reset */ |
1137 | 1137 | ||
1138 | static int pcmcia_dev_suspend(struct device *dev, pm_message_t state) | 1138 | static int pcmcia_dev_suspend(struct device *dev) |
1139 | { | 1139 | { |
1140 | struct pcmcia_device *p_dev = to_pcmcia_dev(dev); | 1140 | struct pcmcia_device *p_dev = to_pcmcia_dev(dev); |
1141 | struct pcmcia_driver *p_drv = NULL; | 1141 | struct pcmcia_driver *p_drv = NULL; |
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = { | |||
1410 | .remove_dev = &pcmcia_bus_remove_socket, | 1410 | .remove_dev = &pcmcia_bus_remove_socket, |
1411 | }; | 1411 | }; |
1412 | 1412 | ||
1413 | static const struct dev_pm_ops pcmcia_bus_pm_ops = { | ||
1414 | SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume) | ||
1415 | }; | ||
1413 | 1416 | ||
1414 | struct bus_type pcmcia_bus_type = { | 1417 | struct bus_type pcmcia_bus_type = { |
1415 | .name = "pcmcia", | 1418 | .name = "pcmcia", |
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = { | |||
1418 | .dev_groups = pcmcia_dev_groups, | 1421 | .dev_groups = pcmcia_dev_groups, |
1419 | .probe = pcmcia_device_probe, | 1422 | .probe = pcmcia_device_probe, |
1420 | .remove = pcmcia_device_remove, | 1423 | .remove = pcmcia_device_remove, |
1421 | .suspend = pcmcia_dev_suspend, | 1424 | .pm = &pcmcia_bus_pm_ops, |
1422 | .resume = pcmcia_dev_resume, | ||
1423 | }; | 1425 | }; |
1424 | 1426 | ||
1425 | 1427 | ||
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 483f919e0d2e..91b5f5724cba 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c | |||
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, | |||
214 | } | 214 | } |
215 | #endif | 215 | #endif |
216 | 216 | ||
217 | void pxa2xx_configure_sockets(struct device *dev) | 217 | void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops) |
218 | { | 218 | { |
219 | struct pcmcia_low_level *ops = dev->platform_data; | ||
220 | /* | 219 | /* |
221 | * We have at least one socket, so set MECR:CIT | 220 | * We have at least one socket, so set MECR:CIT |
222 | * (Card Is There) | 221 | * (Card Is There) |
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev) | |||
322 | goto err1; | 321 | goto err1; |
323 | } | 322 | } |
324 | 323 | ||
325 | pxa2xx_configure_sockets(&dev->dev); | 324 | pxa2xx_configure_sockets(&dev->dev, ops); |
326 | dev_set_drvdata(&dev->dev, sinfo); | 325 | dev_set_drvdata(&dev->dev, sinfo); |
327 | 326 | ||
328 | return 0; | 327 | return 0; |
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) | |||
348 | 347 | ||
349 | static int pxa2xx_drv_pcmcia_resume(struct device *dev) | 348 | static int pxa2xx_drv_pcmcia_resume(struct device *dev) |
350 | { | 349 | { |
351 | pxa2xx_configure_sockets(dev); | 350 | struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data; |
351 | |||
352 | pxa2xx_configure_sockets(dev, ops); | ||
352 | return 0; | 353 | return 0; |
353 | } | 354 | } |
354 | 355 | ||
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h index b609b45469ed..e58c7a415418 100644 --- a/drivers/pcmcia/pxa2xx_base.h +++ b/drivers/pcmcia/pxa2xx_base.h | |||
@@ -1,4 +1,4 @@ | |||
1 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); | 1 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); |
2 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); | 2 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); |
3 | void pxa2xx_configure_sockets(struct device *dev); | 3 | void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops); |
4 | 4 | ||
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c index 12f0dd091477..2f490930430d 100644 --- a/drivers/pcmcia/sa1111_badge4.c +++ b/drivers/pcmcia/sa1111_badge4.c | |||
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = { | |||
134 | 134 | ||
135 | int pcmcia_badge4_init(struct sa1111_dev *dev) | 135 | int pcmcia_badge4_init(struct sa1111_dev *dev) |
136 | { | 136 | { |
137 | int ret = -ENODEV; | 137 | printk(KERN_INFO |
138 | 138 | "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n", | |
139 | if (machine_is_badge4()) { | 139 | __func__, |
140 | printk(KERN_INFO | 140 | badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); |
141 | "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n", | 141 | |
142 | __func__, | 142 | sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops); |
143 | badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); | 143 | return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops, |
144 | 144 | sa11xx_drv_pcmcia_add_one); | |
145 | sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops); | ||
146 | ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops, | ||
147 | sa11xx_drv_pcmcia_add_one); | ||
148 | } | ||
149 | |||
150 | return ret; | ||
151 | } | 145 | } |
152 | 146 | ||
153 | static int __init pcmv_setup(char *s) | 147 | static int __init pcmv_setup(char *s) |
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c index a1531feb8460..3d95dffcff7a 100644 --- a/drivers/pcmcia/sa1111_generic.c +++ b/drivers/pcmcia/sa1111_generic.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <mach/hardware.h> | 19 | #include <mach/hardware.h> |
20 | #include <asm/hardware/sa1111.h> | 20 | #include <asm/hardware/sa1111.h> |
21 | #include <asm/mach-types.h> | ||
21 | #include <asm/irq.h> | 22 | #include <asm/irq.h> |
22 | 23 | ||
23 | #include "sa1111_generic.h" | 24 | #include "sa1111_generic.h" |
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev) | |||
203 | sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR); | 204 | sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR); |
204 | sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR); | 205 | sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR); |
205 | 206 | ||
207 | ret = -ENODEV; | ||
206 | #ifdef CONFIG_SA1100_BADGE4 | 208 | #ifdef CONFIG_SA1100_BADGE4 |
207 | pcmcia_badge4_init(dev); | 209 | if (machine_is_badge4()) |
210 | ret = pcmcia_badge4_init(dev); | ||
208 | #endif | 211 | #endif |
209 | #ifdef CONFIG_SA1100_JORNADA720 | 212 | #ifdef CONFIG_SA1100_JORNADA720 |
210 | pcmcia_jornada720_init(dev); | 213 | if (machine_is_jornada720()) |
214 | ret = pcmcia_jornada720_init(dev); | ||
211 | #endif | 215 | #endif |
212 | #ifdef CONFIG_ARCH_LUBBOCK | 216 | #ifdef CONFIG_ARCH_LUBBOCK |
213 | pcmcia_lubbock_init(dev); | 217 | if (machine_is_lubbock()) |
218 | ret = pcmcia_lubbock_init(dev); | ||
214 | #endif | 219 | #endif |
215 | #ifdef CONFIG_ASSABET_NEPONSET | 220 | #ifdef CONFIG_ASSABET_NEPONSET |
216 | pcmcia_neponset_init(dev); | 221 | if (machine_is_assabet()) |
222 | ret = pcmcia_neponset_init(dev); | ||
217 | #endif | 223 | #endif |
218 | return 0; | 224 | |
225 | if (ret) { | ||
226 | release_mem_region(dev->res.start, 512); | ||
227 | sa1111_disable_device(dev); | ||
228 | } | ||
229 | |||
230 | return ret; | ||
219 | } | 231 | } |
220 | 232 | ||
221 | static int pcmcia_remove(struct sa1111_dev *dev) | 233 | static int pcmcia_remove(struct sa1111_dev *dev) |
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c index c2c30580c83f..480a3ede27c8 100644 --- a/drivers/pcmcia/sa1111_jornada720.c +++ b/drivers/pcmcia/sa1111_jornada720.c | |||
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = { | |||
94 | 94 | ||
95 | int pcmcia_jornada720_init(struct sa1111_dev *sadev) | 95 | int pcmcia_jornada720_init(struct sa1111_dev *sadev) |
96 | { | 96 | { |
97 | int ret = -ENODEV; | 97 | unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; |
98 | 98 | ||
99 | if (machine_is_jornada720()) { | 99 | /* Fixme: why messing around with SA11x0's GPIO1? */ |
100 | unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; | 100 | GRER |= 0x00000002; |
101 | 101 | ||
102 | GRER |= 0x00000002; | 102 | /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ |
103 | sa1111_set_io_dir(sadev, pin, 0, 0); | ||
104 | sa1111_set_io(sadev, pin, 0); | ||
105 | sa1111_set_sleep_io(sadev, pin, 0); | ||
103 | 106 | ||
104 | /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ | 107 | sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops); |
105 | sa1111_set_io_dir(sadev, pin, 0, 0); | 108 | return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops, |
106 | sa1111_set_io(sadev, pin, 0); | 109 | sa11xx_drv_pcmcia_add_one); |
107 | sa1111_set_sleep_io(sadev, pin, 0); | ||
108 | |||
109 | sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops); | ||
110 | ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops, | ||
111 | sa11xx_drv_pcmcia_add_one); | ||
112 | } | ||
113 | |||
114 | return ret; | ||
115 | } | 110 | } |
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c index c5caf5790451..e741f499c875 100644 --- a/drivers/pcmcia/sa1111_lubbock.c +++ b/drivers/pcmcia/sa1111_lubbock.c | |||
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = { | |||
210 | 210 | ||
211 | int pcmcia_lubbock_init(struct sa1111_dev *sadev) | 211 | int pcmcia_lubbock_init(struct sa1111_dev *sadev) |
212 | { | 212 | { |
213 | int ret = -ENODEV; | 213 | /* |
214 | 214 | * Set GPIO_A<3:0> to be outputs for the MAX1600, | |
215 | if (machine_is_lubbock()) { | 215 | * and switch to standby mode. |
216 | /* | 216 | */ |
217 | * Set GPIO_A<3:0> to be outputs for the MAX1600, | 217 | sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); |
218 | * and switch to standby mode. | 218 | sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); |
219 | */ | 219 | sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); |
220 | sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); | ||
221 | sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); | ||
222 | sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); | ||
223 | |||
224 | /* Set CF Socket 1 power to standby mode. */ | ||
225 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); | ||
226 | 220 | ||
227 | pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); | 221 | /* Set CF Socket 1 power to standby mode. */ |
228 | pxa2xx_configure_sockets(&sadev->dev); | 222 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); |
229 | ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, | ||
230 | pxa2xx_drv_pcmcia_add_one); | ||
231 | } | ||
232 | 223 | ||
233 | return ret; | 224 | pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); |
225 | pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops); | ||
226 | return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, | ||
227 | pxa2xx_drv_pcmcia_add_one); | ||
234 | } | 228 | } |
235 | 229 | ||
236 | MODULE_LICENSE("GPL"); | 230 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c index 1d78739c4c07..019c395eb4bf 100644 --- a/drivers/pcmcia/sa1111_neponset.c +++ b/drivers/pcmcia/sa1111_neponset.c | |||
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = { | |||
110 | 110 | ||
111 | int pcmcia_neponset_init(struct sa1111_dev *sadev) | 111 | int pcmcia_neponset_init(struct sa1111_dev *sadev) |
112 | { | 112 | { |
113 | int ret = -ENODEV; | 113 | /* |
114 | 114 | * Set GPIO_A<3:0> to be outputs for the MAX1600, | |
115 | if (machine_is_assabet()) { | 115 | * and switch to standby mode. |
116 | /* | 116 | */ |
117 | * Set GPIO_A<3:0> to be outputs for the MAX1600, | 117 | sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); |
118 | * and switch to standby mode. | 118 | sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); |
119 | */ | 119 | sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); |
120 | sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); | 120 | sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops); |
121 | sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); | 121 | return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops, |
122 | sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); | 122 | sa11xx_drv_pcmcia_add_one); |
123 | sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops); | ||
124 | ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops, | ||
125 | sa11xx_drv_pcmcia_add_one); | ||
126 | } | ||
127 | |||
128 | return ret; | ||
129 | } | 123 | } |
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c index 9f6ec87b9f9e..48140ac73ed6 100644 --- a/drivers/pcmcia/sa11xx_base.c +++ b/drivers/pcmcia/sa11xx_base.c | |||
@@ -144,19 +144,19 @@ static int | |||
144 | sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf) | 144 | sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf) |
145 | { | 145 | { |
146 | struct soc_pcmcia_timing timing; | 146 | struct soc_pcmcia_timing timing; |
147 | unsigned int clock = clk_get_rate(skt->clk); | 147 | unsigned int clock = clk_get_rate(skt->clk) / 1000; |
148 | unsigned long mecr = MECR; | 148 | unsigned long mecr = MECR; |
149 | char *p = buf; | 149 | char *p = buf; |
150 | 150 | ||
151 | soc_common_pcmcia_get_timing(skt, &timing); | 151 | soc_common_pcmcia_get_timing(skt, &timing); |
152 | 152 | ||
153 | p+=sprintf(p, "I/O : %u (%u)\n", timing.io, | 153 | p+=sprintf(p, "I/O : %uns (%uns)\n", timing.io, |
154 | sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr))); | 154 | sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr))); |
155 | 155 | ||
156 | p+=sprintf(p, "attribute: %u (%u)\n", timing.attr, | 156 | p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr, |
157 | sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr))); | 157 | sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr))); |
158 | 158 | ||
159 | p+=sprintf(p, "common : %u (%u)\n", timing.mem, | 159 | p+=sprintf(p, "common : %uns (%uns)\n", timing.mem, |
160 | sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr))); | 160 | sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr))); |
161 | 161 | ||
162 | return p - buf; | 162 | return p - buf; |
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index eed5e9c05353..d5ca760c4eb2 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c | |||
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt) | |||
235 | stat |= skt->cs_state.Vcc ? SS_POWERON : 0; | 235 | stat |= skt->cs_state.Vcc ? SS_POWERON : 0; |
236 | 236 | ||
237 | if (skt->cs_state.flags & SS_IOCARD) | 237 | if (skt->cs_state.flags & SS_IOCARD) |
238 | stat |= state.bvd1 ? SS_STSCHG : 0; | 238 | stat |= state.bvd1 ? 0 : SS_STSCHG; |
239 | else { | 239 | else { |
240 | if (state.bvd1 == 0) | 240 | if (state.bvd1 == 0) |
241 | stat |= SS_BATDEAD; | 241 | stat |= SS_BATDEAD; |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index c494613c1909..f5e1008a223d 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -925,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
925 | if (i > 0 && spi != using_spi) { | 925 | if (i > 0 && spi != using_spi) { |
926 | pr_err("PPI/SPI IRQ type mismatch for %s!\n", | 926 | pr_err("PPI/SPI IRQ type mismatch for %s!\n", |
927 | dn->name); | 927 | dn->name); |
928 | of_node_put(dn); | ||
928 | kfree(irqs); | 929 | kfree(irqs); |
929 | return -EINVAL; | 930 | return -EINVAL; |
930 | } | 931 | } |
@@ -969,7 +970,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
969 | if (cpumask_weight(&pmu->supported_cpus) == 0) { | 970 | if (cpumask_weight(&pmu->supported_cpus) == 0) { |
970 | int irq = platform_get_irq(pdev, 0); | 971 | int irq = platform_get_irq(pdev, 0); |
971 | 972 | ||
972 | if (irq_is_percpu(irq)) { | 973 | if (irq >= 0 && irq_is_percpu(irq)) { |
973 | /* If using PPIs, check the affinity of the partition */ | 974 | /* If using PPIs, check the affinity of the partition */ |
974 | int ret; | 975 | int ret; |
975 | 976 | ||
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c index 18d662610075..8ffc44afdb75 100644 --- a/drivers/phy/phy-brcm-sata.c +++ b/drivers/phy/phy-brcm-sata.c | |||
@@ -367,7 +367,7 @@ static int brcm_sata_phy_init(struct phy *phy) | |||
367 | rc = -ENODEV; | 367 | rc = -ENODEV; |
368 | }; | 368 | }; |
369 | 369 | ||
370 | return 0; | 370 | return rc; |
371 | } | 371 | } |
372 | 372 | ||
373 | static const struct phy_ops phy_ops = { | 373 | static const struct phy_ops phy_ops = { |
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index 0a45bc6088ae..8c7eb335622e 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/power_supply.h> | 40 | #include <linux/power_supply.h> |
41 | #include <linux/regulator/consumer.h> | 41 | #include <linux/regulator/consumer.h> |
42 | #include <linux/reset.h> | 42 | #include <linux/reset.h> |
43 | #include <linux/usb/of.h> | ||
43 | #include <linux/workqueue.h> | 44 | #include <linux/workqueue.h> |
44 | 45 | ||
45 | #define REG_ISCR 0x00 | 46 | #define REG_ISCR 0x00 |
@@ -110,6 +111,7 @@ struct sun4i_usb_phy_cfg { | |||
110 | struct sun4i_usb_phy_data { | 111 | struct sun4i_usb_phy_data { |
111 | void __iomem *base; | 112 | void __iomem *base; |
112 | const struct sun4i_usb_phy_cfg *cfg; | 113 | const struct sun4i_usb_phy_cfg *cfg; |
114 | enum usb_dr_mode dr_mode; | ||
113 | struct mutex mutex; | 115 | struct mutex mutex; |
114 | struct sun4i_usb_phy { | 116 | struct sun4i_usb_phy { |
115 | struct phy *phy; | 117 | struct phy *phy; |
@@ -120,6 +122,7 @@ struct sun4i_usb_phy_data { | |||
120 | bool regulator_on; | 122 | bool regulator_on; |
121 | int index; | 123 | int index; |
122 | } phys[MAX_PHYS]; | 124 | } phys[MAX_PHYS]; |
125 | int first_phy; | ||
123 | /* phy0 / otg related variables */ | 126 | /* phy0 / otg related variables */ |
124 | struct extcon_dev *extcon; | 127 | struct extcon_dev *extcon; |
125 | bool phy0_init; | 128 | bool phy0_init; |
@@ -285,16 +288,10 @@ static int sun4i_usb_phy_init(struct phy *_phy) | |||
285 | sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN); | 288 | sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN); |
286 | sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN); | 289 | sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN); |
287 | 290 | ||
288 | if (data->id_det_gpio) { | 291 | /* Force ISCR and cable state updates */ |
289 | /* OTG mode, force ISCR and cable state updates */ | 292 | data->id_det = -1; |
290 | data->id_det = -1; | 293 | data->vbus_det = -1; |
291 | data->vbus_det = -1; | 294 | queue_delayed_work(system_wq, &data->detect, 0); |
292 | queue_delayed_work(system_wq, &data->detect, 0); | ||
293 | } else { | ||
294 | /* Host only mode */ | ||
295 | sun4i_usb_phy0_set_id_detect(_phy, 0); | ||
296 | sun4i_usb_phy0_set_vbus_detect(_phy, 1); | ||
297 | } | ||
298 | } | 295 | } |
299 | 296 | ||
300 | return 0; | 297 | return 0; |
@@ -319,6 +316,19 @@ static int sun4i_usb_phy_exit(struct phy *_phy) | |||
319 | return 0; | 316 | return 0; |
320 | } | 317 | } |
321 | 318 | ||
319 | static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data) | ||
320 | { | ||
321 | switch (data->dr_mode) { | ||
322 | case USB_DR_MODE_OTG: | ||
323 | return gpiod_get_value_cansleep(data->id_det_gpio); | ||
324 | case USB_DR_MODE_HOST: | ||
325 | return 0; | ||
326 | case USB_DR_MODE_PERIPHERAL: | ||
327 | default: | ||
328 | return 1; | ||
329 | } | ||
330 | } | ||
331 | |||
322 | static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data) | 332 | static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data) |
323 | { | 333 | { |
324 | if (data->vbus_det_gpio) | 334 | if (data->vbus_det_gpio) |
@@ -432,7 +442,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) | |||
432 | struct phy *phy0 = data->phys[0].phy; | 442 | struct phy *phy0 = data->phys[0].phy; |
433 | int id_det, vbus_det, id_notify = 0, vbus_notify = 0; | 443 | int id_det, vbus_det, id_notify = 0, vbus_notify = 0; |
434 | 444 | ||
435 | id_det = gpiod_get_value_cansleep(data->id_det_gpio); | 445 | if (phy0 == NULL) |
446 | return; | ||
447 | |||
448 | id_det = sun4i_usb_phy0_get_id_det(data); | ||
436 | vbus_det = sun4i_usb_phy0_get_vbus_det(data); | 449 | vbus_det = sun4i_usb_phy0_get_vbus_det(data); |
437 | 450 | ||
438 | mutex_lock(&phy0->mutex); | 451 | mutex_lock(&phy0->mutex); |
@@ -448,7 +461,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) | |||
448 | * without vbus detection report vbus low for long enough for | 461 | * without vbus detection report vbus low for long enough for |
449 | * the musb-ip to end the current device session. | 462 | * the musb-ip to end the current device session. |
450 | */ | 463 | */ |
451 | if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) { | 464 | if (data->dr_mode == USB_DR_MODE_OTG && |
465 | !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) { | ||
452 | sun4i_usb_phy0_set_vbus_detect(phy0, 0); | 466 | sun4i_usb_phy0_set_vbus_detect(phy0, 0); |
453 | msleep(200); | 467 | msleep(200); |
454 | sun4i_usb_phy0_set_vbus_detect(phy0, 1); | 468 | sun4i_usb_phy0_set_vbus_detect(phy0, 1); |
@@ -474,7 +488,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) | |||
474 | * without vbus detection report vbus low for long enough to | 488 | * without vbus detection report vbus low for long enough to |
475 | * the musb-ip to end the current host session. | 489 | * the musb-ip to end the current host session. |
476 | */ | 490 | */ |
477 | if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) { | 491 | if (data->dr_mode == USB_DR_MODE_OTG && |
492 | !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) { | ||
478 | mutex_lock(&phy0->mutex); | 493 | mutex_lock(&phy0->mutex); |
479 | sun4i_usb_phy0_set_vbus_detect(phy0, 0); | 494 | sun4i_usb_phy0_set_vbus_detect(phy0, 0); |
480 | msleep(1000); | 495 | msleep(1000); |
@@ -519,7 +534,8 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev, | |||
519 | { | 534 | { |
520 | struct sun4i_usb_phy_data *data = dev_get_drvdata(dev); | 535 | struct sun4i_usb_phy_data *data = dev_get_drvdata(dev); |
521 | 536 | ||
522 | if (args->args[0] >= data->cfg->num_phys) | 537 | if (args->args[0] < data->first_phy || |
538 | args->args[0] >= data->cfg->num_phys) | ||
523 | return ERR_PTR(-ENODEV); | 539 | return ERR_PTR(-ENODEV); |
524 | 540 | ||
525 | return data->phys[args->args[0]].phy; | 541 | return data->phys[args->args[0]].phy; |
@@ -593,13 +609,17 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) | |||
593 | return -EPROBE_DEFER; | 609 | return -EPROBE_DEFER; |
594 | } | 610 | } |
595 | 611 | ||
596 | /* vbus_det without id_det makes no sense, and is not supported */ | 612 | data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0); |
597 | if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) { | 613 | switch (data->dr_mode) { |
598 | dev_err(dev, "usb0_id_det missing or invalid\n"); | 614 | case USB_DR_MODE_OTG: |
599 | return -ENODEV; | 615 | /* otg without id_det makes no sense, and is not supported */ |
600 | } | 616 | if (!data->id_det_gpio) { |
601 | 617 | dev_err(dev, "usb0_id_det missing or invalid\n"); | |
602 | if (data->id_det_gpio) { | 618 | return -ENODEV; |
619 | } | ||
620 | /* fall through */ | ||
621 | case USB_DR_MODE_HOST: | ||
622 | case USB_DR_MODE_PERIPHERAL: | ||
603 | data->extcon = devm_extcon_dev_allocate(dev, | 623 | data->extcon = devm_extcon_dev_allocate(dev, |
604 | sun4i_usb_phy0_cable); | 624 | sun4i_usb_phy0_cable); |
605 | if (IS_ERR(data->extcon)) | 625 | if (IS_ERR(data->extcon)) |
@@ -610,9 +630,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) | |||
610 | dev_err(dev, "failed to register extcon: %d\n", ret); | 630 | dev_err(dev, "failed to register extcon: %d\n", ret); |
611 | return ret; | 631 | return ret; |
612 | } | 632 | } |
633 | break; | ||
634 | default: | ||
635 | dev_info(dev, "dr_mode unknown, not registering usb phy0\n"); | ||
636 | data->first_phy = 1; | ||
613 | } | 637 | } |
614 | 638 | ||
615 | for (i = 0; i < data->cfg->num_phys; i++) { | 639 | for (i = data->first_phy; i < data->cfg->num_phys; i++) { |
616 | struct sun4i_usb_phy *phy = data->phys + i; | 640 | struct sun4i_usb_phy *phy = data->phys + i; |
617 | char name[16]; | 641 | char name[16]; |
618 | 642 | ||
diff --git a/drivers/phy/phy-sun9i-usb.c b/drivers/phy/phy-sun9i-usb.c index ac4f31abefe3..28fce4bce638 100644 --- a/drivers/phy/phy-sun9i-usb.c +++ b/drivers/phy/phy-sun9i-usb.c | |||
@@ -141,9 +141,9 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | phy->hsic_clk = devm_clk_get(dev, "hsic_12M"); | 143 | phy->hsic_clk = devm_clk_get(dev, "hsic_12M"); |
144 | if (IS_ERR(phy->clk)) { | 144 | if (IS_ERR(phy->hsic_clk)) { |
145 | dev_err(dev, "failed to get hsic_12M clock\n"); | 145 | dev_err(dev, "failed to get hsic_12M clock\n"); |
146 | return PTR_ERR(phy->clk); | 146 | return PTR_ERR(phy->hsic_clk); |
147 | } | 147 | } |
148 | 148 | ||
149 | phy->reset = devm_reset_control_get(dev, "hsic"); | 149 | phy->reset = devm_reset_control_get(dev, "hsic"); |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 5749a4eee746..0fe8fad25e4d 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1539,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
1539 | offset += range->npins; | 1539 | offset += range->npins; |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | /* Mask and clear all interrupts */ | 1542 | /* Clear all interrupts */ |
1543 | chv_writel(0, pctrl->regs + CHV_INTMASK); | ||
1544 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); | 1543 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
1545 | 1544 | ||
1546 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, | 1545 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, |
1547 | handle_simple_irq, IRQ_TYPE_NONE); | 1546 | handle_bad_irq, IRQ_TYPE_NONE); |
1548 | if (ret) { | 1547 | if (ret) { |
1549 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); | 1548 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); |
1550 | goto fail; | 1549 | goto fail; |
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index eb4990ff26ca..7fb765642ee7 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/io.h> | ||
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
16 | #include <linux/pinctrl/pinconf.h> | 17 | #include <linux/pinctrl/pinconf.h> |
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 11623c6b0cb3..44e69c963f5d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c | |||
@@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev) | |||
727 | return PTR_ERR(pc->pcdev); | 727 | return PTR_ERR(pc->pcdev); |
728 | } | 728 | } |
729 | 729 | ||
730 | ret = meson_gpiolib_register(pc); | 730 | return meson_gpiolib_register(pc); |
731 | if (ret) { | ||
732 | pinctrl_unregister(pc->pcdev); | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | return 0; | ||
737 | } | 731 | } |
738 | 732 | ||
739 | static struct platform_driver meson_pinctrl_driver = { | 733 | static struct platform_driver meson_pinctrl_driver = { |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 634b4d30eefb..b3e772390ab6 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset) | |||
43 | 43 | ||
44 | spin_lock_irqsave(&gpio_dev->lock, flags); | 44 | spin_lock_irqsave(&gpio_dev->lock, flags); |
45 | pin_reg = readl(gpio_dev->base + offset * 4); | 45 | pin_reg = readl(gpio_dev->base + offset * 4); |
46 | /* | ||
47 | * Suppose BIOS or Bootloader sets specific debounce for the | ||
48 | * GPIO. if not, set debounce to be 2.75ms and remove glitch. | ||
49 | */ | ||
50 | if ((pin_reg & DB_TMR_OUT_MASK) == 0) { | ||
51 | pin_reg |= 0xf; | ||
52 | pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | ||
53 | pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; | ||
54 | pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | ||
55 | } | ||
56 | |||
57 | pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); | 46 | pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); |
58 | writel(pin_reg, gpio_dev->base + offset * 4); | 47 | writel(pin_reg, gpio_dev->base + offset * 4); |
59 | spin_unlock_irqrestore(&gpio_dev->lock, flags); | 48 | spin_unlock_irqrestore(&gpio_dev->lock, flags); |
@@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d) | |||
326 | 315 | ||
327 | spin_lock_irqsave(&gpio_dev->lock, flags); | 316 | spin_lock_irqsave(&gpio_dev->lock, flags); |
328 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); | 317 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); |
329 | /* | ||
330 | Suppose BIOS or Bootloader sets specific debounce for the | ||
331 | GPIO. if not, set debounce to be 2.75ms. | ||
332 | */ | ||
333 | if ((pin_reg & DB_TMR_OUT_MASK) == 0) { | ||
334 | pin_reg |= 0xf; | ||
335 | pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | ||
336 | pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | ||
337 | } | ||
338 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); | 318 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); |
339 | pin_reg |= BIT(INTERRUPT_MASK_OFF); | 319 | pin_reg |= BIT(INTERRUPT_MASK_OFF); |
340 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); | 320 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); |
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index c6d410ef8de0..55375b1b3cc8 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c | |||
@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = { | |||
809 | PADS_FUNCTION_SELECT2, 12, 0x3), | 809 | PADS_FUNCTION_SELECT2, 12, 0x3), |
810 | MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, | 810 | MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, |
811 | PADS_FUNCTION_SELECT2, 14, 0x3), | 811 | PADS_FUNCTION_SELECT2, 14, 0x3), |
812 | MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, | 812 | MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG, |
813 | PADS_FUNCTION_SELECT2, 16, 0x3), | 813 | PADS_FUNCTION_SELECT2, 16, 0x3), |
814 | MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, | 814 | MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, |
815 | PADS_FUNCTION_SELECT2, 18, 0x3), | 815 | PADS_FUNCTION_SELECT2, 18, 0x3), |
816 | MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, | 816 | MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG, |
817 | PADS_FUNCTION_SELECT2, 20, 0x3), | 817 | PADS_FUNCTION_SELECT2, 20, 0x3), |
818 | MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG, | 818 | MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG, |
819 | PADS_FUNCTION_SELECT2, 22, 0x3), | 819 | PADS_FUNCTION_SELECT2, 22, 0x3), |
820 | MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG, | 820 | MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG, |
821 | PADS_FUNCTION_SELECT2, 24, 0x3), | 821 | PADS_FUNCTION_SELECT2, 24, 0x3), |
822 | MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5, | 822 | MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5, |
823 | PADS_FUNCTION_SELECT2, 26, 0x3), | 823 | PADS_FUNCTION_SELECT2, 26, 0x3), |
824 | PIN_GROUP(TCK, "tck"), | 824 | PIN_GROUP(TCK, "tck"), |
825 | PIN_GROUP(TRSTN, "trstn"), | 825 | PIN_GROUP(TRSTN, "trstn"), |
@@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) | |||
1432 | { | 1432 | { |
1433 | struct pistachio_pinctrl *pctl; | 1433 | struct pistachio_pinctrl *pctl; |
1434 | struct resource *res; | 1434 | struct resource *res; |
1435 | int ret; | ||
1436 | 1435 | ||
1437 | pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); | 1436 | pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); |
1438 | if (!pctl) | 1437 | if (!pctl) |
@@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) | |||
1464 | return PTR_ERR(pctl->pctldev); | 1463 | return PTR_ERR(pctl->pctldev); |
1465 | } | 1464 | } |
1466 | 1465 | ||
1467 | ret = pistachio_gpio_register(pctl); | 1466 | return pistachio_gpio_register(pctl); |
1468 | if (ret < 0) { | ||
1469 | pinctrl_unregister(pctl->pctldev); | ||
1470 | return ret; | ||
1471 | } | ||
1472 | |||
1473 | return 0; | ||
1474 | } | 1467 | } |
1475 | 1468 | ||
1476 | static struct platform_driver pistachio_pinctrl_driver = { | 1469 | static struct platform_driver pistachio_pinctrl_driver = { |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c index ce483b03a263..f9d661e5c14a 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c | |||
@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = { | |||
485 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), | 485 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), |
486 | SUNXI_FUNCTION(0x0, "gpio_in"), | 486 | SUNXI_FUNCTION(0x0, "gpio_in"), |
487 | SUNXI_FUNCTION(0x1, "gpio_out"), | 487 | SUNXI_FUNCTION(0x1, "gpio_out"), |
488 | SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ | 488 | SUNXI_FUNCTION(0x2, "uart1"), /* RTS */ |
489 | SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */ | 489 | SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */ |
490 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), | 490 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), |
491 | SUNXI_FUNCTION(0x0, "gpio_in"), | 491 | SUNXI_FUNCTION(0x0, "gpio_in"), |
492 | SUNXI_FUNCTION(0x1, "gpio_out"), | 492 | SUNXI_FUNCTION(0x1, "gpio_out"), |
493 | SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ | 493 | SUNXI_FUNCTION(0x2, "uart1"), /* CTS */ |
494 | SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */ | 494 | SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */ |
495 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), | 495 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), |
496 | SUNXI_FUNCTION(0x0, "gpio_in"), | 496 | SUNXI_FUNCTION(0x0, "gpio_in"), |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c index 3040abe6f73a..3131cac2b76f 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c | |||
@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = { | |||
407 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), | 407 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), |
408 | SUNXI_FUNCTION(0x0, "gpio_in"), | 408 | SUNXI_FUNCTION(0x0, "gpio_in"), |
409 | SUNXI_FUNCTION(0x1, "gpio_out"), | 409 | SUNXI_FUNCTION(0x1, "gpio_out"), |
410 | SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ | 410 | SUNXI_FUNCTION(0x2, "uart1"), /* RTS */ |
411 | SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */ | 411 | SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */ |
412 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), | 412 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), |
413 | SUNXI_FUNCTION(0x0, "gpio_in"), | 413 | SUNXI_FUNCTION(0x0, "gpio_in"), |
414 | SUNXI_FUNCTION(0x1, "gpio_out"), | 414 | SUNXI_FUNCTION(0x1, "gpio_out"), |
415 | SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ | 415 | SUNXI_FUNCTION(0x2, "uart1"), /* CTS */ |
416 | SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */ | 416 | SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */ |
417 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), | 417 | SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), |
418 | SUNXI_FUNCTION(0x0, "gpio_in"), | 418 | SUNXI_FUNCTION(0x0, "gpio_in"), |
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c index f99b183d5296..374a8028fec7 100644 --- a/drivers/platform/olpc/olpc-ec.c +++ b/drivers/platform/olpc/olpc-ec.c | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Generic driver for the OLPC Embedded Controller. | 2 | * Generic driver for the OLPC Embedded Controller. |
3 | * | 3 | * |
4 | * Author: Andres Salomon <dilinger@queued.net> | ||
5 | * | ||
4 | * Copyright (C) 2011-2012 One Laptop per Child Foundation. | 6 | * Copyright (C) 2011-2012 One Laptop per Child Foundation. |
5 | * | 7 | * |
6 | * Licensed under the GPL v2 or later. | 8 | * Licensed under the GPL v2 or later. |
@@ -12,7 +14,7 @@ | |||
12 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/workqueue.h> | 16 | #include <linux/workqueue.h> |
15 | #include <linux/module.h> | 17 | #include <linux/init.h> |
16 | #include <linux/list.h> | 18 | #include <linux/list.h> |
17 | #include <linux/olpc-ec.h> | 19 | #include <linux/olpc-ec.h> |
18 | #include <asm/olpc.h> | 20 | #include <asm/olpc.h> |
@@ -326,8 +328,4 @@ static int __init olpc_ec_init_module(void) | |||
326 | { | 328 | { |
327 | return platform_driver_register(&olpc_ec_plat_driver); | 329 | return platform_driver_register(&olpc_ec_plat_driver); |
328 | } | 330 | } |
329 | |||
330 | arch_initcall(olpc_ec_init_module); | 331 | arch_initcall(olpc_ec_init_module); |
331 | |||
332 | MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); | ||
333 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 63b371d6ee55..91ae58510d92 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
@@ -1,6 +1,8 @@ | |||
1 | /* Moorestown PMIC GPIO (access through IPC) driver | 1 | /* Moorestown PMIC GPIO (access through IPC) driver |
2 | * Copyright (c) 2008 - 2009, Intel Corporation. | 2 | * Copyright (c) 2008 - 2009, Intel Corporation. |
3 | * | 3 | * |
4 | * Author: Alek Du <alek.du@intel.com> | ||
5 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
@@ -21,7 +23,6 @@ | |||
21 | 23 | ||
22 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 24 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
23 | 25 | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
27 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
@@ -322,9 +323,4 @@ static int __init platform_pmic_gpio_init(void) | |||
322 | { | 323 | { |
323 | return platform_driver_register(&platform_pmic_gpio_driver); | 324 | return platform_driver_register(&platform_pmic_gpio_driver); |
324 | } | 325 | } |
325 | |||
326 | subsys_initcall(platform_pmic_gpio_init); | 326 | subsys_initcall(platform_pmic_gpio_init); |
327 | |||
328 | MODULE_AUTHOR("Alek Du <alek.du@intel.com>"); | ||
329 | MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver"); | ||
330 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 9c65f134d447..da7a75f82489 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c | |||
@@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip, | |||
457 | } | 457 | } |
458 | 458 | ||
459 | static inline void max17042_read_model_data(struct max17042_chip *chip, | 459 | static inline void max17042_read_model_data(struct max17042_chip *chip, |
460 | u8 addr, u32 *data, int size) | 460 | u8 addr, u16 *data, int size) |
461 | { | 461 | { |
462 | struct regmap *map = chip->regmap; | 462 | struct regmap *map = chip->regmap; |
463 | int i; | 463 | int i; |
464 | u32 tmp; | ||
464 | 465 | ||
465 | for (i = 0; i < size; i++) | 466 | for (i = 0; i < size; i++) { |
466 | regmap_read(map, addr + i, &data[i]); | 467 | regmap_read(map, addr + i, &tmp); |
468 | data[i] = (u16)tmp; | ||
469 | } | ||
467 | } | 470 | } |
468 | 471 | ||
469 | static inline int max17042_model_data_compare(struct max17042_chip *chip, | 472 | static inline int max17042_model_data_compare(struct max17042_chip *chip, |
@@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip) | |||
486 | { | 489 | { |
487 | int ret; | 490 | int ret; |
488 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); | 491 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); |
489 | u32 *temp_data; | 492 | u16 *temp_data; |
490 | 493 | ||
491 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); | 494 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); |
492 | if (!temp_data) | 495 | if (!temp_data) |
@@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip) | |||
501 | ret = max17042_model_data_compare( | 504 | ret = max17042_model_data_compare( |
502 | chip, | 505 | chip, |
503 | chip->pdata->config_data->cell_char_tbl, | 506 | chip->pdata->config_data->cell_char_tbl, |
504 | (u16 *)temp_data, | 507 | temp_data, |
505 | table_size); | 508 | table_size); |
506 | 509 | ||
507 | max10742_lock_model(chip); | 510 | max10742_lock_model(chip); |
@@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) | |||
514 | { | 517 | { |
515 | int i; | 518 | int i; |
516 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); | 519 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); |
517 | u32 *temp_data; | 520 | u16 *temp_data; |
518 | int ret = 0; | 521 | int ret = 0; |
519 | 522 | ||
520 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); | 523 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); |
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 3bfac539334b..c74c3f67b8da 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig | |||
@@ -200,8 +200,8 @@ config REBOOT_MODE | |||
200 | config SYSCON_REBOOT_MODE | 200 | config SYSCON_REBOOT_MODE |
201 | tristate "Generic SYSCON regmap reboot mode driver" | 201 | tristate "Generic SYSCON regmap reboot mode driver" |
202 | depends on OF | 202 | depends on OF |
203 | depends on MFD_SYSCON | ||
203 | select REBOOT_MODE | 204 | select REBOOT_MODE |
204 | select MFD_SYSCON | ||
205 | help | 205 | help |
206 | Say y here will enable reboot mode driver. This will | 206 | Say y here will enable reboot mode driver. This will |
207 | get reboot mode arguments and store it in SYSCON mapped | 207 | get reboot mode arguments and store it in SYSCON mapped |
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c index 9ab7f562a83b..f69387e12c1e 100644 --- a/drivers/power/reset/hisi-reboot.c +++ b/drivers/power/reset/hisi-reboot.c | |||
@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev) | |||
53 | 53 | ||
54 | if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { | 54 | if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { |
55 | pr_err("failed to find reboot-offset property\n"); | 55 | pr_err("failed to find reboot-offset property\n"); |
56 | iounmap(base); | ||
56 | return -EINVAL; | 57 | return -EINVAL; |
57 | } | 58 | } |
58 | 59 | ||
59 | err = register_restart_handler(&hisi_restart_nb); | 60 | err = register_restart_handler(&hisi_restart_nb); |
60 | if (err) | 61 | if (err) { |
61 | dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", | 62 | dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", |
62 | err); | 63 | err); |
64 | iounmap(base); | ||
65 | } | ||
63 | 66 | ||
64 | return err; | 67 | return err; |
65 | } | 68 | } |
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c index 73dfae41def8..4c56e54af6ac 100644 --- a/drivers/power/tps65217_charger.c +++ b/drivers/power/tps65217_charger.c | |||
@@ -206,6 +206,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) | |||
206 | if (!charger) | 206 | if (!charger) |
207 | return -ENOMEM; | 207 | return -ENOMEM; |
208 | 208 | ||
209 | platform_set_drvdata(pdev, charger); | ||
209 | charger->tps = tps; | 210 | charger->tps = tps; |
210 | charger->dev = &pdev->dev; | 211 | charger->dev = &pdev->dev; |
211 | 212 | ||
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 32f0f014a067..9d19b9a62011 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
@@ -1161,7 +1161,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | |||
1161 | } else if (ibw_start < (ib_win->rstart + ib_win->size) && | 1161 | } else if (ibw_start < (ib_win->rstart + ib_win->size) && |
1162 | (ibw_start + ibw_size) > ib_win->rstart) { | 1162 | (ibw_start + ibw_size) > ib_win->rstart) { |
1163 | /* Return error if address translation involved */ | 1163 | /* Return error if address translation involved */ |
1164 | if (direct && ib_win->xlat) { | 1164 | if (!direct || ib_win->xlat) { |
1165 | ret = -EFAULT; | 1165 | ret = -EFAULT; |
1166 | break; | 1166 | break; |
1167 | } | 1167 | } |
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index 3fa17ac8df54..cebc296463ad 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c | |||
@@ -2247,17 +2247,30 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code, | |||
2247 | { | 2247 | { |
2248 | struct rio_channel *ch; | 2248 | struct rio_channel *ch; |
2249 | unsigned int i; | 2249 | unsigned int i; |
2250 | LIST_HEAD(list); | ||
2250 | 2251 | ||
2251 | riocm_debug(EXIT, "."); | 2252 | riocm_debug(EXIT, "."); |
2252 | 2253 | ||
2254 | /* | ||
2255 | * If there are any channels left in connected state send | ||
2256 | * close notification to the connection partner. | ||
2257 | * First build a list of channels that require a closing | ||
2258 | * notification because function riocm_send_close() should | ||
2259 | * be called outside of spinlock protected code. | ||
2260 | */ | ||
2253 | spin_lock_bh(&idr_lock); | 2261 | spin_lock_bh(&idr_lock); |
2254 | idr_for_each_entry(&ch_idr, ch, i) { | 2262 | idr_for_each_entry(&ch_idr, ch, i) { |
2255 | riocm_debug(EXIT, "close ch %d", ch->id); | 2263 | if (ch->state == RIO_CM_CONNECTED) { |
2256 | if (ch->state == RIO_CM_CONNECTED) | 2264 | riocm_debug(EXIT, "close ch %d", ch->id); |
2257 | riocm_send_close(ch); | 2265 | idr_remove(&ch_idr, ch->id); |
2266 | list_add(&ch->ch_node, &list); | ||
2267 | } | ||
2258 | } | 2268 | } |
2259 | spin_unlock_bh(&idr_lock); | 2269 | spin_unlock_bh(&idr_lock); |
2260 | 2270 | ||
2271 | list_for_each_entry(ch, &list, ch_node) | ||
2272 | riocm_send_close(ch); | ||
2273 | |||
2261 | return NOTIFY_DONE; | 2274 | return NOTIFY_DONE; |
2262 | } | 2275 | } |
2263 | 2276 | ||
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c index b2daa6641417..c9ff26199711 100644 --- a/drivers/regulator/max14577-regulator.c +++ b/drivers/regulator/max14577-regulator.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * max14577.c - Regulator driver for the Maxim 14577/77836 | 2 | * max14577.c - Regulator driver for the Maxim 14577/77836 |
3 | * | 3 | * |
4 | * Copyright (C) 2013,2014 Samsung Electronics | 4 | * Copyright (C) 2013,2014 Samsung Electronics |
5 | * Krzysztof Kozlowski <k.kozlowski@samsung.com> | 5 | * Krzysztof Kozlowski <krzk@kernel.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void) | |||
331 | } | 331 | } |
332 | module_exit(max14577_regulator_exit); | 332 | module_exit(max14577_regulator_exit); |
333 | 333 | ||
334 | MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>"); | 334 | MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); |
335 | MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); | 335 | MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); |
336 | MODULE_LICENSE("GPL"); | 336 | MODULE_LICENSE("GPL"); |
337 | MODULE_ALIAS("platform:max14577-regulator"); | 337 | MODULE_ALIAS("platform:max14577-regulator"); |
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c index de730fd3f8a5..cfbb9512e486 100644 --- a/drivers/regulator/max77693-regulator.c +++ b/drivers/regulator/max77693-regulator.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2013-2015 Samsung Electronics | 4 | * Copyright (C) 2013-2015 Samsung Electronics |
5 | * Jonghwa Lee <jonghwa3.lee@samsung.com> | 5 | * Jonghwa Lee <jonghwa3.lee@samsung.com> |
6 | * Krzysztof Kozlowski <k.kozlowski.k@gmail.com> | 6 | * Krzysztof Kozlowski <krzk@kernel.org> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup); | |||
314 | 314 | ||
315 | MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver"); | 315 | MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver"); |
316 | MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>"); | 316 | MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>"); |
317 | MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>"); | 317 | MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); |
318 | MODULE_LICENSE("GPL"); | 318 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 5022fa8d10c6..8ed46a9a55c8 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c | |||
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = { | |||
178 | static const struct regulator_desc pma8084_ftsmps = { | 178 | static const struct regulator_desc pma8084_ftsmps = { |
179 | .linear_ranges = (struct regulator_linear_range[]) { | 179 | .linear_ranges = (struct regulator_linear_range[]) { |
180 | REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), | 180 | REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), |
181 | REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000), | 181 | REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000), |
182 | }, | 182 | }, |
183 | .n_linear_ranges = 2, | 183 | .n_linear_ranges = 2, |
184 | .n_voltages = 340, | 184 | .n_voltages = 262, |
185 | .ops = &rpm_smps_ldo_ops, | 185 | .ops = &rpm_smps_ldo_ops, |
186 | }; | 186 | }; |
187 | 187 | ||
188 | static const struct regulator_desc pma8084_pldo = { | 188 | static const struct regulator_desc pma8084_pldo = { |
189 | .linear_ranges = (struct regulator_linear_range[]) { | 189 | .linear_ranges = (struct regulator_linear_range[]) { |
190 | REGULATOR_LINEAR_RANGE(750000, 0, 30, 25000), | 190 | REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500), |
191 | REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), | 191 | REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000), |
192 | REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000), | ||
192 | }, | 193 | }, |
193 | .n_linear_ranges = 2, | 194 | .n_linear_ranges = 3, |
194 | .n_voltages = 100, | 195 | .n_voltages = 164, |
195 | .ops = &rpm_smps_ldo_ops, | 196 | .ops = &rpm_smps_ldo_ops, |
196 | }; | 197 | }; |
197 | 198 | ||
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = { | |||
221 | static const struct regulator_desc pm8841_ftsmps = { | 222 | static const struct regulator_desc pm8841_ftsmps = { |
222 | .linear_ranges = (struct regulator_linear_range[]) { | 223 | .linear_ranges = (struct regulator_linear_range[]) { |
223 | REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), | 224 | REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000), |
224 | REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000), | 225 | REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000), |
225 | }, | 226 | }, |
226 | .n_linear_ranges = 2, | 227 | .n_linear_ranges = 2, |
227 | .n_voltages = 340, | 228 | .n_voltages = 262, |
228 | .ops = &rpm_smps_ldo_ops, | 229 | .ops = &rpm_smps_ldo_ops, |
229 | }; | 230 | }; |
230 | 231 | ||
231 | static const struct regulator_desc pm8941_boost = { | 232 | static const struct regulator_desc pm8941_boost = { |
232 | .linear_ranges = (struct regulator_linear_range[]) { | 233 | .linear_ranges = (struct regulator_linear_range[]) { |
233 | REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000), | 234 | REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000), |
234 | }, | 235 | }, |
235 | .n_linear_ranges = 1, | 236 | .n_linear_ranges = 1, |
236 | .n_voltages = 16, | 237 | .n_voltages = 31, |
237 | .ops = &rpm_smps_ldo_ops, | 238 | .ops = &rpm_smps_ldo_ops, |
238 | }; | 239 | }; |
239 | 240 | ||
240 | static const struct regulator_desc pm8941_pldo = { | 241 | static const struct regulator_desc pm8941_pldo = { |
241 | .linear_ranges = (struct regulator_linear_range[]) { | 242 | .linear_ranges = (struct regulator_linear_range[]) { |
242 | REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000), | 243 | REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500), |
243 | REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000), | 244 | REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000), |
245 | REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000), | ||
244 | }, | 246 | }, |
245 | .n_linear_ranges = 2, | 247 | .n_linear_ranges = 3, |
246 | .n_voltages = 100, | 248 | .n_voltages = 164, |
247 | .ops = &rpm_smps_ldo_ops, | 249 | .ops = &rpm_smps_ldo_ops, |
248 | }; | 250 | }; |
249 | 251 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8973d34ce5ba..fb1b56a71475 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1643 | u8 *sense = NULL; | 1643 | u8 *sense = NULL; |
1644 | int expires; | 1644 | int expires; |
1645 | 1645 | ||
1646 | cqr = (struct dasd_ccw_req *) intparm; | ||
1646 | if (IS_ERR(irb)) { | 1647 | if (IS_ERR(irb)) { |
1647 | switch (PTR_ERR(irb)) { | 1648 | switch (PTR_ERR(irb)) { |
1648 | case -EIO: | 1649 | case -EIO: |
1650 | if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { | ||
1651 | device = (struct dasd_device *) cqr->startdev; | ||
1652 | cqr->status = DASD_CQR_CLEARED; | ||
1653 | dasd_device_clear_timer(device); | ||
1654 | wake_up(&dasd_flush_wq); | ||
1655 | dasd_schedule_device_bh(device); | ||
1656 | return; | ||
1657 | } | ||
1649 | break; | 1658 | break; |
1650 | case -ETIMEDOUT: | 1659 | case -ETIMEDOUT: |
1651 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " | 1660 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " |
@@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1661 | } | 1670 | } |
1662 | 1671 | ||
1663 | now = get_tod_clock(); | 1672 | now = get_tod_clock(); |
1664 | cqr = (struct dasd_ccw_req *) intparm; | ||
1665 | /* check for conditions that should be handled immediately */ | 1673 | /* check for conditions that should be handled immediately */ |
1666 | if (!cqr || | 1674 | if (!cqr || |
1667 | !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | 1675 | !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fd2eff440098..98bbec44bcd0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, | |||
5078 | return PTR_ERR(cqr); | 5078 | return PTR_ERR(cqr); |
5079 | } | 5079 | } |
5080 | 5080 | ||
5081 | cqr->lpm = lpum; | ||
5082 | retry: | ||
5081 | cqr->startdev = device; | 5083 | cqr->startdev = device; |
5082 | cqr->memdev = device; | 5084 | cqr->memdev = device; |
5083 | cqr->block = NULL; | 5085 | cqr->block = NULL; |
@@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, | |||
5122 | (prssdp + 1); | 5124 | (prssdp + 1); |
5123 | memcpy(messages, message_buf, | 5125 | memcpy(messages, message_buf, |
5124 | sizeof(struct dasd_rssd_messages)); | 5126 | sizeof(struct dasd_rssd_messages)); |
5127 | } else if (cqr->lpm) { | ||
5128 | /* | ||
5129 | * on z/VM we might not be able to do I/O on the requested path | ||
5130 | * but instead we get the required information on any path | ||
5131 | * so retry with open path mask | ||
5132 | */ | ||
5133 | cqr->lpm = 0; | ||
5134 | goto retry; | ||
5125 | } else | 5135 | } else |
5126 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, | 5136 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, |
5127 | "Reading messages failed with rc=%d\n" | 5137 | "Reading messages failed with rc=%d\n" |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7ada078ffdd0..6a58bc8f46e2 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
762 | priv->state = DEV_STATE_NOT_OPER; | 762 | priv->state = DEV_STATE_NOT_OPER; |
763 | priv->dev_id.devno = sch->schib.pmcw.dev; | 763 | priv->dev_id.devno = sch->schib.pmcw.dev; |
764 | priv->dev_id.ssid = sch->schid.ssid; | 764 | priv->dev_id.ssid = sch->schid.ssid; |
765 | priv->schid = sch->schid; | ||
766 | 765 | ||
767 | INIT_WORK(&priv->todo_work, ccw_device_todo); | 766 | INIT_WORK(&priv->todo_work, ccw_device_todo); |
768 | INIT_LIST_HEAD(&priv->cmb_list); | 767 | INIT_LIST_HEAD(&priv->cmb_list); |
@@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, | |||
1000 | put_device(&old_sch->dev); | 999 | put_device(&old_sch->dev); |
1001 | /* Initialize new subchannel. */ | 1000 | /* Initialize new subchannel. */ |
1002 | spin_lock_irq(sch->lock); | 1001 | spin_lock_irq(sch->lock); |
1003 | cdev->private->schid = sch->schid; | ||
1004 | cdev->ccwlock = sch->lock; | 1002 | cdev->ccwlock = sch->lock; |
1005 | if (!sch_is_pseudo_sch(sch)) | 1003 | if (!sch_is_pseudo_sch(sch)) |
1006 | sch_set_cdev(sch, cdev); | 1004 | sch_set_cdev(sch, cdev); |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 15b56a15db15..9bc3512374c9 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -26,6 +26,7 @@ | |||
26 | static void | 26 | static void |
27 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | 27 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) |
28 | { | 28 | { |
29 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
29 | char dbf_text[15]; | 30 | char dbf_text[15]; |
30 | 31 | ||
31 | if (!scsw_is_valid_cstat(&irb->scsw) || | 32 | if (!scsw_is_valid_cstat(&irb->scsw) || |
@@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | |||
36 | "received" | 37 | "received" |
37 | " ... device %04x on subchannel 0.%x.%04x, dev_stat " | 38 | " ... device %04x on subchannel 0.%x.%04x, dev_stat " |
38 | ": %02X sch_stat : %02X\n", | 39 | ": %02X sch_stat : %02X\n", |
39 | cdev->private->dev_id.devno, cdev->private->schid.ssid, | 40 | cdev->private->dev_id.devno, sch->schid.ssid, |
40 | cdev->private->schid.sch_no, | 41 | sch->schid.sch_no, |
41 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); | 42 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); |
42 | sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); | 43 | sprintf(dbf_text, "chk%x", sch->schid.sch_no); |
43 | CIO_TRACE_EVENT(0, dbf_text); | 44 | CIO_TRACE_EVENT(0, dbf_text); |
44 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); | 45 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); |
45 | } | 46 | } |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8975060af96c..220f49145b2f 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -120,7 +120,6 @@ struct ccw_device_private { | |||
120 | int state; /* device state */ | 120 | int state; /* device state */ |
121 | atomic_t onoff; | 121 | atomic_t onoff; |
122 | struct ccw_dev_id dev_id; /* device id */ | 122 | struct ccw_dev_id dev_id; /* device id */ |
123 | struct subchannel_id schid; /* subchannel number */ | ||
124 | struct ccw_request req; /* internal I/O request */ | 123 | struct ccw_request req; /* internal I/O request */ |
125 | int iretry; | 124 | int iretry; |
126 | u8 pgid_valid_mask; /* mask of valid PGIDs */ | 125 | u8 pgid_valid_mask; /* mask of valid PGIDs */ |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4bb5262f7aee..71bf9bded485 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) | |||
686 | q->qdio_error = 0; | 686 | q->qdio_error = 0; |
687 | } | 687 | } |
688 | 688 | ||
689 | static inline int qdio_tasklet_schedule(struct qdio_q *q) | ||
690 | { | ||
691 | if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { | ||
692 | tasklet_schedule(&q->tasklet); | ||
693 | return 0; | ||
694 | } | ||
695 | return -EPERM; | ||
696 | } | ||
697 | |||
689 | static void __qdio_inbound_processing(struct qdio_q *q) | 698 | static void __qdio_inbound_processing(struct qdio_q *q) |
690 | { | 699 | { |
691 | qperf_inc(q, tasklet_inbound); | 700 | qperf_inc(q, tasklet_inbound); |
@@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q) | |||
698 | if (!qdio_inbound_q_done(q)) { | 707 | if (!qdio_inbound_q_done(q)) { |
699 | /* means poll time is not yet over */ | 708 | /* means poll time is not yet over */ |
700 | qperf_inc(q, tasklet_inbound_resched); | 709 | qperf_inc(q, tasklet_inbound_resched); |
701 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { | 710 | if (!qdio_tasklet_schedule(q)) |
702 | tasklet_schedule(&q->tasklet); | ||
703 | return; | 711 | return; |
704 | } | ||
705 | } | 712 | } |
706 | 713 | ||
707 | qdio_stop_polling(q); | 714 | qdio_stop_polling(q); |
@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) | |||
711 | */ | 718 | */ |
712 | if (!qdio_inbound_q_done(q)) { | 719 | if (!qdio_inbound_q_done(q)) { |
713 | qperf_inc(q, tasklet_inbound_resched2); | 720 | qperf_inc(q, tasklet_inbound_resched2); |
714 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 721 | qdio_tasklet_schedule(q); |
715 | tasklet_schedule(&q->tasklet); | ||
716 | } | 722 | } |
717 | } | 723 | } |
718 | 724 | ||
@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) | |||
869 | * is noticed and outbound_handler is called after some time. | 875 | * is noticed and outbound_handler is called after some time. |
870 | */ | 876 | */ |
871 | if (qdio_outbound_q_done(q)) | 877 | if (qdio_outbound_q_done(q)) |
872 | del_timer(&q->u.out.timer); | 878 | del_timer_sync(&q->u.out.timer); |
873 | else | 879 | else |
874 | if (!timer_pending(&q->u.out.timer)) | 880 | if (!timer_pending(&q->u.out.timer) && |
881 | likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) | ||
875 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); | 882 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); |
876 | return; | 883 | return; |
877 | 884 | ||
878 | sched: | 885 | sched: |
879 | if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 886 | qdio_tasklet_schedule(q); |
880 | return; | ||
881 | tasklet_schedule(&q->tasklet); | ||
882 | } | 887 | } |
883 | 888 | ||
884 | /* outbound tasklet */ | 889 | /* outbound tasklet */ |
@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) | |||
892 | { | 897 | { |
893 | struct qdio_q *q = (struct qdio_q *)data; | 898 | struct qdio_q *q = (struct qdio_q *)data; |
894 | 899 | ||
895 | if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 900 | qdio_tasklet_schedule(q); |
896 | return; | ||
897 | tasklet_schedule(&q->tasklet); | ||
898 | } | 901 | } |
899 | 902 | ||
900 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) | 903 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) |
@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
907 | 910 | ||
908 | for_each_output_queue(q->irq_ptr, out, i) | 911 | for_each_output_queue(q->irq_ptr, out, i) |
909 | if (!qdio_outbound_q_done(out)) | 912 | if (!qdio_outbound_q_done(out)) |
910 | tasklet_schedule(&out->tasklet); | 913 | qdio_tasklet_schedule(out); |
911 | } | 914 | } |
912 | 915 | ||
913 | static void __tiqdio_inbound_processing(struct qdio_q *q) | 916 | static void __tiqdio_inbound_processing(struct qdio_q *q) |
@@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
929 | 932 | ||
930 | if (!qdio_inbound_q_done(q)) { | 933 | if (!qdio_inbound_q_done(q)) { |
931 | qperf_inc(q, tasklet_inbound_resched); | 934 | qperf_inc(q, tasklet_inbound_resched); |
932 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { | 935 | if (!qdio_tasklet_schedule(q)) |
933 | tasklet_schedule(&q->tasklet); | ||
934 | return; | 936 | return; |
935 | } | ||
936 | } | 937 | } |
937 | 938 | ||
938 | qdio_stop_polling(q); | 939 | qdio_stop_polling(q); |
@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
942 | */ | 943 | */ |
943 | if (!qdio_inbound_q_done(q)) { | 944 | if (!qdio_inbound_q_done(q)) { |
944 | qperf_inc(q, tasklet_inbound_resched2); | 945 | qperf_inc(q, tasklet_inbound_resched2); |
945 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 946 | qdio_tasklet_schedule(q); |
946 | tasklet_schedule(&q->tasklet); | ||
947 | } | 947 | } |
948 | } | 948 | } |
949 | 949 | ||
@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
977 | int i; | 977 | int i; |
978 | struct qdio_q *q; | 978 | struct qdio_q *q; |
979 | 979 | ||
980 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 980 | if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
981 | return; | 981 | return; |
982 | 982 | ||
983 | for_each_input_queue(irq_ptr, q, i) { | 983 | for_each_input_queue(irq_ptr, q, i) { |
@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
1003 | continue; | 1003 | continue; |
1004 | if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) | 1004 | if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) |
1005 | qdio_siga_sync_q(q); | 1005 | qdio_siga_sync_q(q); |
1006 | tasklet_schedule(&q->tasklet); | 1006 | qdio_tasklet_schedule(q); |
1007 | } | 1007 | } |
1008 | } | 1008 | } |
1009 | 1009 | ||
@@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1066 | struct irb *irb) | 1066 | struct irb *irb) |
1067 | { | 1067 | { |
1068 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1068 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1069 | struct subchannel_id schid; | ||
1069 | int cstat, dstat; | 1070 | int cstat, dstat; |
1070 | 1071 | ||
1071 | if (!intparm || !irq_ptr) { | 1072 | if (!intparm || !irq_ptr) { |
1072 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); | 1073 | ccw_device_get_schid(cdev, &schid); |
1074 | DBF_ERROR("qint:%4x", schid.sch_no); | ||
1073 | return; | 1075 | return; |
1074 | } | 1076 | } |
1075 | 1077 | ||
@@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1122 | int qdio_get_ssqd_desc(struct ccw_device *cdev, | 1124 | int qdio_get_ssqd_desc(struct ccw_device *cdev, |
1123 | struct qdio_ssqd_desc *data) | 1125 | struct qdio_ssqd_desc *data) |
1124 | { | 1126 | { |
1127 | struct subchannel_id schid; | ||
1125 | 1128 | ||
1126 | if (!cdev || !cdev->private) | 1129 | if (!cdev || !cdev->private) |
1127 | return -EINVAL; | 1130 | return -EINVAL; |
1128 | 1131 | ||
1129 | DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); | 1132 | ccw_device_get_schid(cdev, &schid); |
1130 | return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); | 1133 | DBF_EVENT("get ssqd:%4x", schid.sch_no); |
1134 | return qdio_setup_get_ssqd(NULL, &schid, data); | ||
1131 | } | 1135 | } |
1132 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | 1136 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); |
1133 | 1137 | ||
@@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1141 | tasklet_kill(&q->tasklet); | 1145 | tasklet_kill(&q->tasklet); |
1142 | 1146 | ||
1143 | for_each_output_queue(irq_ptr, q, i) { | 1147 | for_each_output_queue(irq_ptr, q, i) { |
1144 | del_timer(&q->u.out.timer); | 1148 | del_timer_sync(&q->u.out.timer); |
1145 | tasklet_kill(&q->tasklet); | 1149 | tasklet_kill(&q->tasklet); |
1146 | } | 1150 | } |
1147 | } | 1151 | } |
@@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1154 | int qdio_shutdown(struct ccw_device *cdev, int how) | 1158 | int qdio_shutdown(struct ccw_device *cdev, int how) |
1155 | { | 1159 | { |
1156 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1160 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1161 | struct subchannel_id schid; | ||
1157 | int rc; | 1162 | int rc; |
1158 | unsigned long flags; | ||
1159 | 1163 | ||
1160 | if (!irq_ptr) | 1164 | if (!irq_ptr) |
1161 | return -ENODEV; | 1165 | return -ENODEV; |
1162 | 1166 | ||
1163 | WARN_ON_ONCE(irqs_disabled()); | 1167 | WARN_ON_ONCE(irqs_disabled()); |
1164 | DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); | 1168 | ccw_device_get_schid(cdev, &schid); |
1169 | DBF_EVENT("qshutdown:%4x", schid.sch_no); | ||
1165 | 1170 | ||
1166 | mutex_lock(&irq_ptr->setup_mutex); | 1171 | mutex_lock(&irq_ptr->setup_mutex); |
1167 | /* | 1172 | /* |
@@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1184 | qdio_shutdown_debug_entries(irq_ptr); | 1189 | qdio_shutdown_debug_entries(irq_ptr); |
1185 | 1190 | ||
1186 | /* cleanup subchannel */ | 1191 | /* cleanup subchannel */ |
1187 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1192 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1188 | 1193 | ||
1189 | if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) | 1194 | if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) |
1190 | rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); | 1195 | rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); |
@@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1198 | } | 1203 | } |
1199 | 1204 | ||
1200 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); | 1205 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); |
1201 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1206 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
1202 | wait_event_interruptible_timeout(cdev->private->wait_q, | 1207 | wait_event_interruptible_timeout(cdev->private->wait_q, |
1203 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || | 1208 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || |
1204 | irq_ptr->state == QDIO_IRQ_STATE_ERR, | 1209 | irq_ptr->state == QDIO_IRQ_STATE_ERR, |
1205 | 10 * HZ); | 1210 | 10 * HZ); |
1206 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1211 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1207 | 1212 | ||
1208 | no_cleanup: | 1213 | no_cleanup: |
1209 | qdio_shutdown_thinint(irq_ptr); | 1214 | qdio_shutdown_thinint(irq_ptr); |
@@ -1211,7 +1216,7 @@ no_cleanup: | |||
1211 | /* restore interrupt handler */ | 1216 | /* restore interrupt handler */ |
1212 | if ((void *)cdev->handler == (void *)qdio_int_handler) | 1217 | if ((void *)cdev->handler == (void *)qdio_int_handler) |
1213 | cdev->handler = irq_ptr->orig_handler; | 1218 | cdev->handler = irq_ptr->orig_handler; |
1214 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1219 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
1215 | 1220 | ||
1216 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | 1221 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); |
1217 | mutex_unlock(&irq_ptr->setup_mutex); | 1222 | mutex_unlock(&irq_ptr->setup_mutex); |
@@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); | |||
1228 | int qdio_free(struct ccw_device *cdev) | 1233 | int qdio_free(struct ccw_device *cdev) |
1229 | { | 1234 | { |
1230 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1235 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1236 | struct subchannel_id schid; | ||
1231 | 1237 | ||
1232 | if (!irq_ptr) | 1238 | if (!irq_ptr) |
1233 | return -ENODEV; | 1239 | return -ENODEV; |
1234 | 1240 | ||
1235 | DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); | 1241 | ccw_device_get_schid(cdev, &schid); |
1242 | DBF_EVENT("qfree:%4x", schid.sch_no); | ||
1236 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); | 1243 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); |
1237 | mutex_lock(&irq_ptr->setup_mutex); | 1244 | mutex_lock(&irq_ptr->setup_mutex); |
1238 | 1245 | ||
@@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free); | |||
1251 | */ | 1258 | */ |
1252 | int qdio_allocate(struct qdio_initialize *init_data) | 1259 | int qdio_allocate(struct qdio_initialize *init_data) |
1253 | { | 1260 | { |
1261 | struct subchannel_id schid; | ||
1254 | struct qdio_irq *irq_ptr; | 1262 | struct qdio_irq *irq_ptr; |
1255 | 1263 | ||
1256 | DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); | 1264 | ccw_device_get_schid(init_data->cdev, &schid); |
1265 | DBF_EVENT("qallocate:%4x", schid.sch_no); | ||
1257 | 1266 | ||
1258 | if ((init_data->no_input_qs && !init_data->input_handler) || | 1267 | if ((init_data->no_input_qs && !init_data->input_handler) || |
1259 | (init_data->no_output_qs && !init_data->output_handler)) | 1268 | (init_data->no_output_qs && !init_data->output_handler)) |
@@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) | |||
1331 | */ | 1340 | */ |
1332 | int qdio_establish(struct qdio_initialize *init_data) | 1341 | int qdio_establish(struct qdio_initialize *init_data) |
1333 | { | 1342 | { |
1334 | struct qdio_irq *irq_ptr; | ||
1335 | struct ccw_device *cdev = init_data->cdev; | 1343 | struct ccw_device *cdev = init_data->cdev; |
1336 | unsigned long saveflags; | 1344 | struct subchannel_id schid; |
1345 | struct qdio_irq *irq_ptr; | ||
1337 | int rc; | 1346 | int rc; |
1338 | 1347 | ||
1339 | DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); | 1348 | ccw_device_get_schid(cdev, &schid); |
1349 | DBF_EVENT("qestablish:%4x", schid.sch_no); | ||
1340 | 1350 | ||
1341 | irq_ptr = cdev->private->qdio_data; | 1351 | irq_ptr = cdev->private->qdio_data; |
1342 | if (!irq_ptr) | 1352 | if (!irq_ptr) |
1343 | return -ENODEV; | 1353 | return -ENODEV; |
1344 | 1354 | ||
1345 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
1346 | return -EINVAL; | ||
1347 | |||
1348 | mutex_lock(&irq_ptr->setup_mutex); | 1355 | mutex_lock(&irq_ptr->setup_mutex); |
1349 | qdio_setup_irq(init_data); | 1356 | qdio_setup_irq(init_data); |
1350 | 1357 | ||
@@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1361 | irq_ptr->ccw.count = irq_ptr->equeue.count; | 1368 | irq_ptr->ccw.count = irq_ptr->equeue.count; |
1362 | irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); | 1369 | irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); |
1363 | 1370 | ||
1364 | spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); | 1371 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1365 | ccw_device_set_options_mask(cdev, 0); | 1372 | ccw_device_set_options_mask(cdev, 0); |
1366 | 1373 | ||
1367 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); | 1374 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); |
1375 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
1368 | if (rc) { | 1376 | if (rc) { |
1369 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); | 1377 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); |
1370 | DBF_ERROR("rc:%4x", rc); | 1378 | DBF_ERROR("rc:%4x", rc); |
1371 | } | ||
1372 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | ||
1373 | |||
1374 | if (rc) { | ||
1375 | mutex_unlock(&irq_ptr->setup_mutex); | 1379 | mutex_unlock(&irq_ptr->setup_mutex); |
1376 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | 1380 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
1377 | return rc; | 1381 | return rc; |
@@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish); | |||
1407 | */ | 1411 | */ |
1408 | int qdio_activate(struct ccw_device *cdev) | 1412 | int qdio_activate(struct ccw_device *cdev) |
1409 | { | 1413 | { |
1414 | struct subchannel_id schid; | ||
1410 | struct qdio_irq *irq_ptr; | 1415 | struct qdio_irq *irq_ptr; |
1411 | int rc; | 1416 | int rc; |
1412 | unsigned long saveflags; | ||
1413 | 1417 | ||
1414 | DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); | 1418 | ccw_device_get_schid(cdev, &schid); |
1419 | DBF_EVENT("qactivate:%4x", schid.sch_no); | ||
1415 | 1420 | ||
1416 | irq_ptr = cdev->private->qdio_data; | 1421 | irq_ptr = cdev->private->qdio_data; |
1417 | if (!irq_ptr) | 1422 | if (!irq_ptr) |
1418 | return -ENODEV; | 1423 | return -ENODEV; |
1419 | 1424 | ||
1420 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
1421 | return -EINVAL; | ||
1422 | |||
1423 | mutex_lock(&irq_ptr->setup_mutex); | 1425 | mutex_lock(&irq_ptr->setup_mutex); |
1424 | if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { | 1426 | if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { |
1425 | rc = -EBUSY; | 1427 | rc = -EBUSY; |
@@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev) | |||
1431 | irq_ptr->ccw.count = irq_ptr->aqueue.count; | 1433 | irq_ptr->ccw.count = irq_ptr->aqueue.count; |
1432 | irq_ptr->ccw.cda = 0; | 1434 | irq_ptr->ccw.cda = 0; |
1433 | 1435 | ||
1434 | spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); | 1436 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1435 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); | 1437 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); |
1436 | 1438 | ||
1437 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, | 1439 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, |
1438 | 0, DOIO_DENY_PREFETCH); | 1440 | 0, DOIO_DENY_PREFETCH); |
1441 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
1439 | if (rc) { | 1442 | if (rc) { |
1440 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); | 1443 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); |
1441 | DBF_ERROR("rc:%4x", rc); | 1444 | DBF_ERROR("rc:%4x", rc); |
1442 | } | ||
1443 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | ||
1444 | |||
1445 | if (rc) | ||
1446 | goto out; | 1445 | goto out; |
1446 | } | ||
1447 | 1447 | ||
1448 | if (is_thinint_irq(irq_ptr)) | 1448 | if (is_thinint_irq(irq_ptr)) |
1449 | tiqdio_add_input_queues(irq_ptr); | 1449 | tiqdio_add_input_queues(irq_ptr); |
@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1585 | 1585 | ||
1586 | /* in case of SIGA errors we must process the error immediately */ | 1586 | /* in case of SIGA errors we must process the error immediately */ |
1587 | if (used >= q->u.out.scan_threshold || rc) | 1587 | if (used >= q->u.out.scan_threshold || rc) |
1588 | tasklet_schedule(&q->tasklet); | 1588 | qdio_tasklet_schedule(q); |
1589 | else | 1589 | else |
1590 | /* free the SBALs in case of no further traffic */ | 1590 | /* free the SBALs in case of no further traffic */ |
1591 | if (!timer_pending(&q->u.out.timer)) | 1591 | if (!timer_pending(&q->u.out.timer) && |
1592 | likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) | ||
1592 | mod_timer(&q->u.out.timer, jiffies + HZ); | 1593 | mod_timer(&q->u.out.timer, jiffies + HZ); |
1593 | return rc; | 1594 | return rc; |
1594 | } | 1595 | } |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index bf40063de202..6d4b68c483f3 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -999,6 +999,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, | |||
999 | __u16, __u16, | 999 | __u16, __u16, |
1000 | enum qeth_prot_versions); | 1000 | enum qeth_prot_versions); |
1001 | int qeth_set_features(struct net_device *, netdev_features_t); | 1001 | int qeth_set_features(struct net_device *, netdev_features_t); |
1002 | int qeth_recover_features(struct net_device *); | ||
1002 | netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); | 1003 | netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); |
1003 | 1004 | ||
1004 | /* exports for OSN */ | 1005 | /* exports for OSN */ |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 7dba6c8537a1..20cf29613043 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -3619,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, | |||
3619 | int e; | 3619 | int e; |
3620 | 3620 | ||
3621 | e = 0; | 3621 | e = 0; |
3622 | while (buffer->element[e].addr) { | 3622 | while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && |
3623 | buffer->element[e].addr) { | ||
3623 | unsigned long phys_aob_addr; | 3624 | unsigned long phys_aob_addr; |
3624 | 3625 | ||
3625 | phys_aob_addr = (unsigned long) buffer->element[e].addr; | 3626 | phys_aob_addr = (unsigned long) buffer->element[e].addr; |
@@ -6131,6 +6132,35 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on) | |||
6131 | return rc; | 6132 | return rc; |
6132 | } | 6133 | } |
6133 | 6134 | ||
6135 | /* try to restore device features on a device after recovery */ | ||
6136 | int qeth_recover_features(struct net_device *dev) | ||
6137 | { | ||
6138 | struct qeth_card *card = dev->ml_priv; | ||
6139 | netdev_features_t recover = dev->features; | ||
6140 | |||
6141 | if (recover & NETIF_F_IP_CSUM) { | ||
6142 | if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM)) | ||
6143 | recover ^= NETIF_F_IP_CSUM; | ||
6144 | } | ||
6145 | if (recover & NETIF_F_RXCSUM) { | ||
6146 | if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM)) | ||
6147 | recover ^= NETIF_F_RXCSUM; | ||
6148 | } | ||
6149 | if (recover & NETIF_F_TSO) { | ||
6150 | if (qeth_set_ipa_tso(card, 1)) | ||
6151 | recover ^= NETIF_F_TSO; | ||
6152 | } | ||
6153 | |||
6154 | if (recover == dev->features) | ||
6155 | return 0; | ||
6156 | |||
6157 | dev_warn(&card->gdev->dev, | ||
6158 | "Device recovery failed to restore all offload features\n"); | ||
6159 | dev->features = recover; | ||
6160 | return -EIO; | ||
6161 | } | ||
6162 | EXPORT_SYMBOL_GPL(qeth_recover_features); | ||
6163 | |||
6134 | int qeth_set_features(struct net_device *dev, netdev_features_t features) | 6164 | int qeth_set_features(struct net_device *dev, netdev_features_t features) |
6135 | { | 6165 | { |
6136 | struct qeth_card *card = dev->ml_priv; | 6166 | struct qeth_card *card = dev->ml_priv; |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 7bc20c5188bc..bb27058fa9f0 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1124,14 +1124,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) | |||
1124 | card->dev->hw_features |= NETIF_F_RXCSUM; | 1124 | card->dev->hw_features |= NETIF_F_RXCSUM; |
1125 | card->dev->vlan_features |= NETIF_F_RXCSUM; | 1125 | card->dev->vlan_features |= NETIF_F_RXCSUM; |
1126 | } | 1126 | } |
1127 | /* Turn on SG per default */ | ||
1128 | card->dev->features |= NETIF_F_SG; | ||
1129 | } | 1127 | } |
1130 | card->info.broadcast_capable = 1; | 1128 | card->info.broadcast_capable = 1; |
1131 | qeth_l2_request_initial_mac(card); | 1129 | qeth_l2_request_initial_mac(card); |
1132 | card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * | 1130 | card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * |
1133 | PAGE_SIZE; | 1131 | PAGE_SIZE; |
1134 | card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1); | ||
1135 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); | 1132 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); |
1136 | netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); | 1133 | netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); |
1137 | netif_carrier_off(card->dev); | 1134 | netif_carrier_off(card->dev); |
@@ -1246,6 +1243,9 @@ contin: | |||
1246 | } | 1243 | } |
1247 | /* this also sets saved unicast addresses */ | 1244 | /* this also sets saved unicast addresses */ |
1248 | qeth_l2_set_rx_mode(card->dev); | 1245 | qeth_l2_set_rx_mode(card->dev); |
1246 | rtnl_lock(); | ||
1247 | qeth_recover_features(card->dev); | ||
1248 | rtnl_unlock(); | ||
1249 | } | 1249 | } |
1250 | /* let user_space know that device is online */ | 1250 | /* let user_space know that device is online */ |
1251 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 1251 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 72934666fedf..272d9e7419be 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -257,6 +257,11 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
257 | if (addr->in_progress) | 257 | if (addr->in_progress) |
258 | return -EINPROGRESS; | 258 | return -EINPROGRESS; |
259 | 259 | ||
260 | if (!qeth_card_hw_is_reachable(card)) { | ||
261 | addr->disp_flag = QETH_DISP_ADDR_DELETE; | ||
262 | return 0; | ||
263 | } | ||
264 | |||
260 | rc = qeth_l3_deregister_addr_entry(card, addr); | 265 | rc = qeth_l3_deregister_addr_entry(card, addr); |
261 | 266 | ||
262 | hash_del(&addr->hnode); | 267 | hash_del(&addr->hnode); |
@@ -296,6 +301,11 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
296 | hash_add(card->ip_htable, &addr->hnode, | 301 | hash_add(card->ip_htable, &addr->hnode, |
297 | qeth_l3_ipaddr_hash(addr)); | 302 | qeth_l3_ipaddr_hash(addr)); |
298 | 303 | ||
304 | if (!qeth_card_hw_is_reachable(card)) { | ||
305 | addr->disp_flag = QETH_DISP_ADDR_ADD; | ||
306 | return 0; | ||
307 | } | ||
308 | |||
299 | /* qeth_l3_register_addr_entry can go to sleep | 309 | /* qeth_l3_register_addr_entry can go to sleep |
300 | * if we add a IPV4 addr. It is caused by the reason | 310 | * if we add a IPV4 addr. It is caused by the reason |
301 | * that SETIP ipa cmd starts ARP staff for IPV4 addr. | 311 | * that SETIP ipa cmd starts ARP staff for IPV4 addr. |
@@ -390,12 +400,16 @@ static void qeth_l3_recover_ip(struct qeth_card *card) | |||
390 | int i; | 400 | int i; |
391 | int rc; | 401 | int rc; |
392 | 402 | ||
393 | QETH_CARD_TEXT(card, 4, "recoverip"); | 403 | QETH_CARD_TEXT(card, 4, "recovrip"); |
394 | 404 | ||
395 | spin_lock_bh(&card->ip_lock); | 405 | spin_lock_bh(&card->ip_lock); |
396 | 406 | ||
397 | hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { | 407 | hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { |
398 | if (addr->disp_flag == QETH_DISP_ADDR_ADD) { | 408 | if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { |
409 | qeth_l3_deregister_addr_entry(card, addr); | ||
410 | hash_del(&addr->hnode); | ||
411 | kfree(addr); | ||
412 | } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { | ||
399 | if (addr->proto == QETH_PROT_IPV4) { | 413 | if (addr->proto == QETH_PROT_IPV4) { |
400 | addr->in_progress = 1; | 414 | addr->in_progress = 1; |
401 | spin_unlock_bh(&card->ip_lock); | 415 | spin_unlock_bh(&card->ip_lock); |
@@ -407,10 +421,8 @@ static void qeth_l3_recover_ip(struct qeth_card *card) | |||
407 | 421 | ||
408 | if (!rc) { | 422 | if (!rc) { |
409 | addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; | 423 | addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; |
410 | if (addr->ref_counter < 1) { | 424 | if (addr->ref_counter < 1) |
411 | qeth_l3_delete_ip(card, addr); | 425 | qeth_l3_delete_ip(card, addr); |
412 | kfree(addr); | ||
413 | } | ||
414 | } else { | 426 | } else { |
415 | hash_del(&addr->hnode); | 427 | hash_del(&addr->hnode); |
416 | kfree(addr); | 428 | kfree(addr); |
@@ -689,7 +701,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
689 | 701 | ||
690 | spin_lock_bh(&card->ip_lock); | 702 | spin_lock_bh(&card->ip_lock); |
691 | 703 | ||
692 | if (!qeth_l3_ip_from_hash(card, ipaddr)) | 704 | if (qeth_l3_ip_from_hash(card, ipaddr)) |
693 | rc = -EEXIST; | 705 | rc = -EEXIST; |
694 | else | 706 | else |
695 | qeth_l3_add_ip(card, ipaddr); | 707 | qeth_l3_add_ip(card, ipaddr); |
@@ -757,7 +769,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
757 | 769 | ||
758 | spin_lock_bh(&card->ip_lock); | 770 | spin_lock_bh(&card->ip_lock); |
759 | 771 | ||
760 | if (!qeth_l3_ip_from_hash(card, ipaddr)) | 772 | if (qeth_l3_ip_from_hash(card, ipaddr)) |
761 | rc = -EEXIST; | 773 | rc = -EEXIST; |
762 | else | 774 | else |
763 | qeth_l3_add_ip(card, ipaddr); | 775 | qeth_l3_add_ip(card, ipaddr); |
@@ -3108,7 +3120,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
3108 | card->dev->vlan_features = NETIF_F_SG | | 3120 | card->dev->vlan_features = NETIF_F_SG | |
3109 | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | | 3121 | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | |
3110 | NETIF_F_TSO; | 3122 | NETIF_F_TSO; |
3111 | card->dev->features = NETIF_F_SG; | ||
3112 | } | 3123 | } |
3113 | } | 3124 | } |
3114 | } else if (card->info.type == QETH_CARD_TYPE_IQD) { | 3125 | } else if (card->info.type == QETH_CARD_TYPE_IQD) { |
@@ -3136,7 +3147,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
3136 | netif_keep_dst(card->dev); | 3147 | netif_keep_dst(card->dev); |
3137 | card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * | 3148 | card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * |
3138 | PAGE_SIZE; | 3149 | PAGE_SIZE; |
3139 | card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1); | ||
3140 | 3150 | ||
3141 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); | 3151 | SET_NETDEV_DEV(card->dev, &card->gdev->dev); |
3142 | netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); | 3152 | netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); |
@@ -3269,6 +3279,7 @@ contin: | |||
3269 | else | 3279 | else |
3270 | dev_open(card->dev); | 3280 | dev_open(card->dev); |
3271 | qeth_l3_set_multicast_list(card->dev); | 3281 | qeth_l3_set_multicast_list(card->dev); |
3282 | qeth_recover_features(card->dev); | ||
3272 | rtnl_unlock(); | 3283 | rtnl_unlock(); |
3273 | } | 3284 | } |
3274 | qeth_trace_features(card); | 3285 | qeth_trace_features(card); |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 65645b11fc19..0e00a5ce0f00 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -297,7 +297,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, | |||
297 | addr->u.a6.pfxlen = 0; | 297 | addr->u.a6.pfxlen = 0; |
298 | addr->type = QETH_IP_TYPE_NORMAL; | 298 | addr->type = QETH_IP_TYPE_NORMAL; |
299 | 299 | ||
300 | spin_lock_bh(&card->ip_lock); | ||
300 | qeth_l3_delete_ip(card, addr); | 301 | qeth_l3_delete_ip(card, addr); |
302 | spin_unlock_bh(&card->ip_lock); | ||
301 | kfree(addr); | 303 | kfree(addr); |
302 | } | 304 | } |
303 | 305 | ||
@@ -329,7 +331,10 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, | |||
329 | addr->type = QETH_IP_TYPE_NORMAL; | 331 | addr->type = QETH_IP_TYPE_NORMAL; |
330 | } else | 332 | } else |
331 | return -ENOMEM; | 333 | return -ENOMEM; |
334 | |||
335 | spin_lock_bh(&card->ip_lock); | ||
332 | qeth_l3_add_ip(card, addr); | 336 | qeth_l3_add_ip(card, addr); |
337 | spin_unlock_bh(&card->ip_lock); | ||
333 | kfree(addr); | 338 | kfree(addr); |
334 | 339 | ||
335 | return count; | 340 | return count; |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index b381b3718a98..5648b715fed9 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
63 | struct fib *fibptr; | 63 | struct fib *fibptr; |
64 | struct hw_fib * hw_fib = (struct hw_fib *)0; | 64 | struct hw_fib * hw_fib = (struct hw_fib *)0; |
65 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; | 65 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; |
66 | unsigned size; | 66 | unsigned int size, osize; |
67 | int retval; | 67 | int retval; |
68 | 68 | ||
69 | if (dev->in_reset) { | 69 | if (dev->in_reset) { |
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
87 | * will not overrun the buffer when we copy the memory. Return | 87 | * will not overrun the buffer when we copy the memory. Return |
88 | * an error if we would. | 88 | * an error if we would. |
89 | */ | 89 | */ |
90 | size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); | 90 | osize = size = le16_to_cpu(kfib->header.Size) + |
91 | sizeof(struct aac_fibhdr); | ||
91 | if (size < le16_to_cpu(kfib->header.SenderSize)) | 92 | if (size < le16_to_cpu(kfib->header.SenderSize)) |
92 | size = le16_to_cpu(kfib->header.SenderSize); | 93 | size = le16_to_cpu(kfib->header.SenderSize); |
93 | if (size > dev->max_fib_size) { | 94 | if (size > dev->max_fib_size) { |
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
118 | goto cleanup; | 119 | goto cleanup; |
119 | } | 120 | } |
120 | 121 | ||
122 | /* Sanity check the second copy */ | ||
123 | if ((osize != le16_to_cpu(kfib->header.Size) + | ||
124 | sizeof(struct aac_fibhdr)) | ||
125 | || (size < le16_to_cpu(kfib->header.SenderSize))) { | ||
126 | retval = -EINVAL; | ||
127 | goto cleanup; | ||
128 | } | ||
129 | |||
121 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { | 130 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { |
122 | aac_adapter_interrupt(dev); | 131 | aac_adapter_interrupt(dev); |
123 | /* | 132 | /* |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index 83458f7a2824..6dc96c8dfe75 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -361,8 +361,9 @@ static const char * const snstext[] = { | |||
361 | 361 | ||
362 | /* Get sense key string or NULL if not available */ | 362 | /* Get sense key string or NULL if not available */ |
363 | const char * | 363 | const char * |
364 | scsi_sense_key_string(unsigned char key) { | 364 | scsi_sense_key_string(unsigned char key) |
365 | if (key <= 0xE) | 365 | { |
366 | if (key < ARRAY_SIZE(snstext)) | ||
366 | return snstext[key]; | 367 | return snstext[key]; |
367 | return NULL; | 368 | return NULL; |
368 | } | 369 | } |
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index a569c65f22b1..dcf36537a767 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
2923 | mutex_unlock(&fip->ctlr_mutex); | 2923 | mutex_unlock(&fip->ctlr_mutex); |
2924 | 2924 | ||
2925 | drop: | 2925 | drop: |
2926 | kfree(skb); | 2926 | kfree_skb(skb); |
2927 | return rc; | 2927 | return rc; |
2928 | } | 2928 | } |
2929 | 2929 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2dab3dc2aa69..c1ed25adb17e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
5037 | /* Find first memory bar */ | 5037 | /* Find first memory bar */ |
5038 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); | 5038 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); |
5039 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); | 5039 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); |
5040 | if (pci_request_selected_regions(instance->pdev, instance->bar, | 5040 | if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, |
5041 | "megasas: LSI")) { | 5041 | "megasas: LSI")) { |
5042 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); | 5042 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); |
5043 | return -EBUSY; | 5043 | return -EBUSY; |
@@ -5339,7 +5339,7 @@ fail_ready_state: | |||
5339 | iounmap(instance->reg_set); | 5339 | iounmap(instance->reg_set); |
5340 | 5340 | ||
5341 | fail_ioremap: | 5341 | fail_ioremap: |
5342 | pci_release_selected_regions(instance->pdev, instance->bar); | 5342 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
5343 | 5343 | ||
5344 | return -EINVAL; | 5344 | return -EINVAL; |
5345 | } | 5345 | } |
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance) | |||
5360 | 5360 | ||
5361 | iounmap(instance->reg_set); | 5361 | iounmap(instance->reg_set); |
5362 | 5362 | ||
5363 | pci_release_selected_regions(instance->pdev, instance->bar); | 5363 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
5364 | } | 5364 | } |
5365 | 5365 | ||
5366 | /** | 5366 | /** |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ec837544f784..52d8bbf7feb5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance) | |||
2603 | 2603 | ||
2604 | iounmap(instance->reg_set); | 2604 | iounmap(instance->reg_set); |
2605 | 2605 | ||
2606 | pci_release_selected_regions(instance->pdev, instance->bar); | 2606 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
2607 | } | 2607 | } |
2608 | 2608 | ||
2609 | /** | 2609 | /** |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 751f13edece0..750f82c339d4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | |||
2188 | } else | 2188 | } else |
2189 | ioc->msix96_vector = 0; | 2189 | ioc->msix96_vector = 0; |
2190 | 2190 | ||
2191 | if (ioc->is_warpdrive) { | ||
2192 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) | ||
2193 | &ioc->chip->ReplyPostHostIndex; | ||
2194 | |||
2195 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) | ||
2196 | ioc->reply_post_host_index[i] = | ||
2197 | (resource_size_t __iomem *) | ||
2198 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | ||
2199 | * 4))); | ||
2200 | } | ||
2201 | |||
2191 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) | 2202 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) |
2192 | pr_info(MPT3SAS_FMT "%s: IRQ %d\n", | 2203 | pr_info(MPT3SAS_FMT "%s: IRQ %d\n", |
2193 | reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : | 2204 | reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : |
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |||
5280 | if (r) | 5291 | if (r) |
5281 | goto out_free_resources; | 5292 | goto out_free_resources; |
5282 | 5293 | ||
5283 | if (ioc->is_warpdrive) { | ||
5284 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) | ||
5285 | &ioc->chip->ReplyPostHostIndex; | ||
5286 | |||
5287 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) | ||
5288 | ioc->reply_post_host_index[i] = | ||
5289 | (resource_size_t __iomem *) | ||
5290 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | ||
5291 | * 4))); | ||
5292 | } | ||
5293 | |||
5294 | pci_set_drvdata(ioc->pdev, ioc->shost); | 5294 | pci_set_drvdata(ioc->pdev, ioc->shost); |
5295 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); | 5295 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); |
5296 | if (r) | 5296 | if (r) |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index eaccd651ccda..246456925335 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -246,6 +246,10 @@ static struct { | |||
246 | {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 246 | {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
247 | {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 247 | {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
248 | {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | 248 | {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, |
249 | {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
250 | {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
251 | {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
252 | {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, | ||
249 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, | 253 | {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, |
250 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, | 254 | {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, |
251 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ | 255 | {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ |
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 3f0ff072184b..60b651bfaa01 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c | |||
@@ -341,22 +341,6 @@ static int do_sas_phy_delete(struct device *dev, void *data) | |||
341 | } | 341 | } |
342 | 342 | ||
343 | /** | 343 | /** |
344 | * is_sas_attached - check if device is SAS attached | ||
345 | * @sdev: scsi device to check | ||
346 | * | ||
347 | * returns true if the device is SAS attached | ||
348 | */ | ||
349 | int is_sas_attached(struct scsi_device *sdev) | ||
350 | { | ||
351 | struct Scsi_Host *shost = sdev->host; | ||
352 | |||
353 | return shost->transportt->host_attrs.ac.class == | ||
354 | &sas_host_class.class; | ||
355 | } | ||
356 | EXPORT_SYMBOL(is_sas_attached); | ||
357 | |||
358 | |||
359 | /** | ||
360 | * sas_remove_children - tear down a devices SAS data structures | 344 | * sas_remove_children - tear down a devices SAS data structures |
361 | * @dev: device belonging to the sas object | 345 | * @dev: device belonging to the sas object |
362 | * | 346 | * |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 53ef1cb6418e..8c9a35c91705 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, | |||
587 | 587 | ||
588 | ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); | 588 | ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); |
589 | 589 | ||
590 | if (is_sas_attached(sdev)) | 590 | if (scsi_is_sas_rphy(&sdev->sdev_gendev)) |
591 | efd.addr = sas_get_address(sdev); | 591 | efd.addr = sas_get_address(sdev); |
592 | 592 | ||
593 | if (efd.addr) { | 593 | if (efd.addr) { |
@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) | |||
778 | if (!edev) | 778 | if (!edev) |
779 | return; | 779 | return; |
780 | 780 | ||
781 | enclosure_unregister(edev); | ||
782 | |||
781 | ses_dev = edev->scratch; | 783 | ses_dev = edev->scratch; |
782 | edev->scratch = NULL; | 784 | edev->scratch = NULL; |
783 | 785 | ||
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) | |||
789 | kfree(edev->component[0].scratch); | 791 | kfree(edev->component[0].scratch); |
790 | 792 | ||
791 | put_device(&edev->edev); | 793 | put_device(&edev->edev); |
792 | enclosure_unregister(edev); | ||
793 | } | 794 | } |
794 | 795 | ||
795 | static void ses_intf_remove(struct device *cdev, | 796 | static void ses_intf_remove(struct device *cdev, |
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c index e3da1a2fdb66..2a9da2e0ea6b 100644 --- a/drivers/scsi/wd719x.c +++ b/drivers/scsi/wd719x.c | |||
@@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev) | |||
962 | scsi_host_put(sh); | 962 | scsi_host_put(sh); |
963 | } | 963 | } |
964 | 964 | ||
965 | static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = { | 965 | static const struct pci_device_id wd719x_pci_table[] = { |
966 | { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, | 966 | { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, |
967 | {} | 967 | {} |
968 | }; | 968 | }; |
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 823cbc92d1e7..7a37090dabbe 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev) | |||
720 | clk_disable_unprepare(spfi->sys_clk); | 720 | clk_disable_unprepare(spfi->sys_clk); |
721 | } | 721 | } |
722 | 722 | ||
723 | spi_master_put(master); | ||
724 | |||
725 | return 0; | 723 | return 0; |
726 | } | 724 | } |
727 | 725 | ||
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 0be89e052428..899d7a8f0889 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c | |||
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev) | |||
685 | pm_runtime_disable(&pdev->dev); | 685 | pm_runtime_disable(&pdev->dev); |
686 | 686 | ||
687 | mtk_spi_reset(mdata); | 687 | mtk_spi_reset(mdata); |
688 | spi_master_put(master); | ||
689 | 688 | ||
690 | return 0; | 689 | return 0; |
691 | } | 690 | } |
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index f3df522db93b..58d2d48e16a5 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c | |||
@@ -214,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, | |||
214 | return PTR_ERR(ssp->clk); | 214 | return PTR_ERR(ssp->clk); |
215 | 215 | ||
216 | memset(&pi, 0, sizeof(pi)); | 216 | memset(&pi, 0, sizeof(pi)); |
217 | pi.fwnode = dev->dev.fwnode; | ||
217 | pi.parent = &dev->dev; | 218 | pi.parent = &dev->dev; |
218 | pi.name = "pxa2xx-spi"; | 219 | pi.name = "pxa2xx-spi"; |
219 | pi.id = ssp->port_id; | 220 | pi.id = ssp->port_id; |
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index c338ef1136f6..7f1555621f8e 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c | |||
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev) | |||
1030 | 1030 | ||
1031 | pm_runtime_put_noidle(&pdev->dev); | 1031 | pm_runtime_put_noidle(&pdev->dev); |
1032 | pm_runtime_disable(&pdev->dev); | 1032 | pm_runtime_disable(&pdev->dev); |
1033 | spi_master_put(master); | ||
1034 | 1033 | ||
1035 | return 0; | 1034 | return 0; |
1036 | } | 1035 | } |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0f83ad1d5a58..1de3a772eb7d 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
@@ -262,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, | |||
262 | 262 | ||
263 | for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { | 263 | for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) { |
264 | brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); | 264 | brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div); |
265 | /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ | ||
266 | if (sh_msiof_spi_div_table[k].div == 1 && brps > 2) | ||
267 | continue; | ||
265 | if (brps <= 32) /* max of brdv is 32 */ | 268 | if (brps <= 32) /* max of brdv is 32 */ |
266 | break; | 269 | break; |
267 | } | 270 | } |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 51ad42fad567..200ca228d885 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -960,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master, | |||
960 | struct spi_transfer *xfer; | 960 | struct spi_transfer *xfer; |
961 | bool keep_cs = false; | 961 | bool keep_cs = false; |
962 | int ret = 0; | 962 | int ret = 0; |
963 | unsigned long ms = 1; | 963 | unsigned long long ms = 1; |
964 | struct spi_statistics *statm = &master->statistics; | 964 | struct spi_statistics *statm = &master->statistics; |
965 | struct spi_statistics *stats = &msg->spi->statistics; | 965 | struct spi_statistics *stats = &msg->spi->statistics; |
966 | 966 | ||
@@ -991,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master, | |||
991 | 991 | ||
992 | if (ret > 0) { | 992 | if (ret > 0) { |
993 | ret = 0; | 993 | ret = 0; |
994 | ms = xfer->len * 8 * 1000 / xfer->speed_hz; | 994 | ms = 8LL * 1000LL * xfer->len; |
995 | do_div(ms, xfer->speed_hz); | ||
995 | ms += ms + 100; /* some tolerance */ | 996 | ms += ms + 100; /* some tolerance */ |
996 | 997 | ||
998 | if (ms > UINT_MAX) | ||
999 | ms = UINT_MAX; | ||
1000 | |||
997 | ms = wait_for_completion_timeout(&master->xfer_completion, | 1001 | ms = wait_for_completion_timeout(&master->xfer_completion, |
998 | msecs_to_jiffies(ms)); | 1002 | msecs_to_jiffies(ms)); |
999 | } | 1003 | } |
@@ -1159,6 +1163,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread) | |||
1159 | if (ret < 0) { | 1163 | if (ret < 0) { |
1160 | dev_err(&master->dev, "Failed to power device: %d\n", | 1164 | dev_err(&master->dev, "Failed to power device: %d\n", |
1161 | ret); | 1165 | ret); |
1166 | mutex_unlock(&master->io_mutex); | ||
1162 | return; | 1167 | return; |
1163 | } | 1168 | } |
1164 | } | 1169 | } |
@@ -1174,6 +1179,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread) | |||
1174 | 1179 | ||
1175 | if (master->auto_runtime_pm) | 1180 | if (master->auto_runtime_pm) |
1176 | pm_runtime_put(master->dev.parent); | 1181 | pm_runtime_put(master->dev.parent); |
1182 | mutex_unlock(&master->io_mutex); | ||
1177 | return; | 1183 | return; |
1178 | } | 1184 | } |
1179 | } | 1185 | } |
diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c index d7dd1e55e347..9f525ff7290c 100644 --- a/drivers/staging/comedi/drivers/adv_pci1760.c +++ b/drivers/staging/comedi/drivers/adv_pci1760.c | |||
@@ -196,6 +196,7 @@ static int pci1760_pwm_ns_to_div(unsigned int flags, unsigned int ns) | |||
196 | break; | 196 | break; |
197 | case CMDF_ROUND_DOWN: | 197 | case CMDF_ROUND_DOWN: |
198 | divisor = ns / PCI1760_PWM_TIMEBASE; | 198 | divisor = ns / PCI1760_PWM_TIMEBASE; |
199 | break; | ||
199 | default: | 200 | default: |
200 | return -EINVAL; | 201 | return -EINVAL; |
201 | } | 202 | } |
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c index 4ab186669f0c..ec5b9a23494d 100644 --- a/drivers/staging/comedi/drivers/comedi_test.c +++ b/drivers/staging/comedi/drivers/comedi_test.c | |||
@@ -56,11 +56,6 @@ | |||
56 | 56 | ||
57 | #define N_CHANS 8 | 57 | #define N_CHANS 8 |
58 | 58 | ||
59 | enum waveform_state_bits { | ||
60 | WAVEFORM_AI_RUNNING, | ||
61 | WAVEFORM_AO_RUNNING | ||
62 | }; | ||
63 | |||
64 | /* Data unique to this driver */ | 59 | /* Data unique to this driver */ |
65 | struct waveform_private { | 60 | struct waveform_private { |
66 | struct timer_list ai_timer; /* timer for AI commands */ | 61 | struct timer_list ai_timer; /* timer for AI commands */ |
@@ -68,7 +63,6 @@ struct waveform_private { | |||
68 | unsigned int wf_amplitude; /* waveform amplitude in microvolts */ | 63 | unsigned int wf_amplitude; /* waveform amplitude in microvolts */ |
69 | unsigned int wf_period; /* waveform period in microseconds */ | 64 | unsigned int wf_period; /* waveform period in microseconds */ |
70 | unsigned int wf_current; /* current time in waveform period */ | 65 | unsigned int wf_current; /* current time in waveform period */ |
71 | unsigned long state_bits; | ||
72 | unsigned int ai_scan_period; /* AI scan period in usec */ | 66 | unsigned int ai_scan_period; /* AI scan period in usec */ |
73 | unsigned int ai_convert_period; /* AI conversion period in usec */ | 67 | unsigned int ai_convert_period; /* AI conversion period in usec */ |
74 | struct timer_list ao_timer; /* timer for AO commands */ | 68 | struct timer_list ao_timer; /* timer for AO commands */ |
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg) | |||
191 | unsigned int nsamples; | 185 | unsigned int nsamples; |
192 | unsigned int time_increment; | 186 | unsigned int time_increment; |
193 | 187 | ||
194 | /* check command is still active */ | ||
195 | if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits)) | ||
196 | return; | ||
197 | |||
198 | now = ktime_to_us(ktime_get()); | 188 | now = ktime_to_us(ktime_get()); |
199 | nsamples = comedi_nsamples_left(s, UINT_MAX); | 189 | nsamples = comedi_nsamples_left(s, UINT_MAX); |
200 | 190 | ||
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev, | |||
386 | */ | 376 | */ |
387 | devpriv->ai_timer.expires = | 377 | devpriv->ai_timer.expires = |
388 | jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; | 378 | jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; |
389 | |||
390 | /* mark command as active */ | ||
391 | smp_mb__before_atomic(); | ||
392 | set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits); | ||
393 | smp_mb__after_atomic(); | ||
394 | add_timer(&devpriv->ai_timer); | 379 | add_timer(&devpriv->ai_timer); |
395 | return 0; | 380 | return 0; |
396 | } | 381 | } |
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev, | |||
400 | { | 385 | { |
401 | struct waveform_private *devpriv = dev->private; | 386 | struct waveform_private *devpriv = dev->private; |
402 | 387 | ||
403 | /* mark command as no longer active */ | 388 | if (in_softirq()) { |
404 | clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits); | 389 | /* Assume we were called from the timer routine itself. */ |
405 | smp_mb__after_atomic(); | 390 | del_timer(&devpriv->ai_timer); |
406 | /* cannot call del_timer_sync() as may be called from timer routine */ | 391 | } else { |
407 | del_timer(&devpriv->ai_timer); | 392 | del_timer_sync(&devpriv->ai_timer); |
393 | } | ||
408 | return 0; | 394 | return 0; |
409 | } | 395 | } |
410 | 396 | ||
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg) | |||
436 | u64 scans_since; | 422 | u64 scans_since; |
437 | unsigned int scans_avail = 0; | 423 | unsigned int scans_avail = 0; |
438 | 424 | ||
439 | /* check command is still active */ | ||
440 | if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits)) | ||
441 | return; | ||
442 | |||
443 | /* determine number of scan periods since last time */ | 425 | /* determine number of scan periods since last time */ |
444 | now = ktime_to_us(ktime_get()); | 426 | now = ktime_to_us(ktime_get()); |
445 | scans_since = now - devpriv->ao_last_scan_time; | 427 | scans_since = now - devpriv->ao_last_scan_time; |
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev, | |||
518 | devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); | 500 | devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); |
519 | devpriv->ao_timer.expires = | 501 | devpriv->ao_timer.expires = |
520 | jiffies + usecs_to_jiffies(devpriv->ao_scan_period); | 502 | jiffies + usecs_to_jiffies(devpriv->ao_scan_period); |
521 | |||
522 | /* mark command as active */ | ||
523 | smp_mb__before_atomic(); | ||
524 | set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits); | ||
525 | smp_mb__after_atomic(); | ||
526 | add_timer(&devpriv->ao_timer); | 503 | add_timer(&devpriv->ao_timer); |
527 | 504 | ||
528 | return 1; | 505 | return 1; |
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev, | |||
608 | struct waveform_private *devpriv = dev->private; | 585 | struct waveform_private *devpriv = dev->private; |
609 | 586 | ||
610 | s->async->inttrig = NULL; | 587 | s->async->inttrig = NULL; |
611 | /* mark command as no longer active */ | 588 | if (in_softirq()) { |
612 | clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits); | 589 | /* Assume we were called from the timer routine itself. */ |
613 | smp_mb__after_atomic(); | 590 | del_timer(&devpriv->ao_timer); |
614 | /* cannot call del_timer_sync() as may be called from timer routine */ | 591 | } else { |
615 | del_timer(&devpriv->ao_timer); | 592 | del_timer_sync(&devpriv->ao_timer); |
593 | } | ||
616 | return 0; | 594 | return 0; |
617 | } | 595 | } |
618 | 596 | ||
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c index 65daef0c00d5..0f4eb954aa80 100644 --- a/drivers/staging/comedi/drivers/daqboard2000.c +++ b/drivers/staging/comedi/drivers/daqboard2000.c | |||
@@ -634,7 +634,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev, | |||
634 | const struct daq200_boardtype *board; | 634 | const struct daq200_boardtype *board; |
635 | int i; | 635 | int i; |
636 | 636 | ||
637 | if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH) | 637 | if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH) |
638 | return NULL; | 638 | return NULL; |
639 | 639 | ||
640 | for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { | 640 | for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { |
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c index 904f637797b6..8bbd93814340 100644 --- a/drivers/staging/comedi/drivers/dt2811.c +++ b/drivers/staging/comedi/drivers/dt2811.c | |||
@@ -588,8 +588,8 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it) | |||
588 | s = &dev->subdevices[0]; | 588 | s = &dev->subdevices[0]; |
589 | s->type = COMEDI_SUBD_AI; | 589 | s->type = COMEDI_SUBD_AI; |
590 | s->subdev_flags = SDF_READABLE | | 590 | s->subdev_flags = SDF_READABLE | |
591 | (it->options[2] == 1) ? SDF_DIFF : | 591 | ((it->options[2] == 1) ? SDF_DIFF : |
592 | (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND; | 592 | (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND); |
593 | s->n_chan = (it->options[2] == 1) ? 8 : 16; | 593 | s->n_chan = (it->options[2] == 1) ? 8 : 16; |
594 | s->maxdata = 0x0fff; | 594 | s->maxdata = 0x0fff; |
595 | s->range_table = board->is_pgh ? &dt2811_pgh_ai_ranges | 595 | s->range_table = board->is_pgh ? &dt2811_pgh_ai_ranges |
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 8dabb19519a5..0f97d7b611d7 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c | |||
@@ -2772,7 +2772,15 @@ static int ni_ao_inttrig(struct comedi_device *dev, | |||
2772 | int i; | 2772 | int i; |
2773 | static const int timeout = 1000; | 2773 | static const int timeout = 1000; |
2774 | 2774 | ||
2775 | if (trig_num != cmd->start_arg) | 2775 | /* |
2776 | * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT. | ||
2777 | * For backwards compatibility, also allow trig_num == 0 when | ||
2778 | * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT); | ||
2779 | * in that case, the internal trigger is being used as a pre-trigger | ||
2780 | * before the external trigger. | ||
2781 | */ | ||
2782 | if (!(trig_num == cmd->start_arg || | ||
2783 | (trig_num == 0 && cmd->start_src != TRIG_INT))) | ||
2776 | return -EINVAL; | 2784 | return -EINVAL; |
2777 | 2785 | ||
2778 | /* | 2786 | /* |
@@ -5480,7 +5488,7 @@ static int ni_E_init(struct comedi_device *dev, | |||
5480 | s->maxdata = (devpriv->is_m_series) ? 0xffffffff | 5488 | s->maxdata = (devpriv->is_m_series) ? 0xffffffff |
5481 | : 0x00ffffff; | 5489 | : 0x00ffffff; |
5482 | s->insn_read = ni_tio_insn_read; | 5490 | s->insn_read = ni_tio_insn_read; |
5483 | s->insn_write = ni_tio_insn_read; | 5491 | s->insn_write = ni_tio_insn_write; |
5484 | s->insn_config = ni_tio_insn_config; | 5492 | s->insn_config = ni_tio_insn_config; |
5485 | #ifdef PCIDMA | 5493 | #ifdef PCIDMA |
5486 | if (dev->irq && devpriv->mite) { | 5494 | if (dev->irq && devpriv->mite) { |
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 170ac980abcb..24c348d2f5bb 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c | |||
@@ -419,6 +419,7 @@ static ssize_t ad5933_store(struct device *dev, | |||
419 | mutex_lock(&indio_dev->mlock); | 419 | mutex_lock(&indio_dev->mlock); |
420 | switch ((u32)this_attr->address) { | 420 | switch ((u32)this_attr->address) { |
421 | case AD5933_OUT_RANGE: | 421 | case AD5933_OUT_RANGE: |
422 | ret = -EINVAL; | ||
422 | for (i = 0; i < 4; i++) | 423 | for (i = 0; i < 4; i++) |
423 | if (val == st->range_avail[i]) { | 424 | if (val == st->range_avail[i]) { |
424 | st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3); | 425 | st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3); |
@@ -426,7 +427,6 @@ static ssize_t ad5933_store(struct device *dev, | |||
426 | ret = ad5933_cmd(st, 0); | 427 | ret = ad5933_cmd(st, 0); |
427 | break; | 428 | break; |
428 | } | 429 | } |
429 | ret = -EINVAL; | ||
430 | break; | 430 | break; |
431 | case AD5933_IN_PGA_GAIN: | 431 | case AD5933_IN_PGA_GAIN: |
432 | if (sysfs_streq(buf, "1")) { | 432 | if (sysfs_streq(buf, "1")) { |
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 3664bfd0178b..2c4dc69731e8 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c | |||
@@ -388,6 +388,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, | |||
388 | struct inode *inode = NULL; | 388 | struct inode *inode = NULL; |
389 | __u64 bits = 0; | 389 | __u64 bits = 0; |
390 | int rc = 0; | 390 | int rc = 0; |
391 | struct dentry *alias; | ||
391 | 392 | ||
392 | /* NB 1 request reference will be taken away by ll_intent_lock() | 393 | /* NB 1 request reference will be taken away by ll_intent_lock() |
393 | * when I return | 394 | * when I return |
@@ -412,26 +413,12 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, | |||
412 | */ | 413 | */ |
413 | } | 414 | } |
414 | 415 | ||
415 | /* Only hash *de if it is unhashed (new dentry). | 416 | alias = ll_splice_alias(inode, *de); |
416 | * Atoimc_open may passing hashed dentries for open. | 417 | if (IS_ERR(alias)) { |
417 | */ | 418 | rc = PTR_ERR(alias); |
418 | if (d_unhashed(*de)) { | 419 | goto out; |
419 | struct dentry *alias; | ||
420 | |||
421 | alias = ll_splice_alias(inode, *de); | ||
422 | if (IS_ERR(alias)) { | ||
423 | rc = PTR_ERR(alias); | ||
424 | goto out; | ||
425 | } | ||
426 | *de = alias; | ||
427 | } else if (!it_disposition(it, DISP_LOOKUP_NEG) && | ||
428 | !it_disposition(it, DISP_OPEN_CREATE)) { | ||
429 | /* With DISP_OPEN_CREATE dentry will be | ||
430 | * instantiated in ll_create_it. | ||
431 | */ | ||
432 | LASSERT(!d_inode(*de)); | ||
433 | d_instantiate(*de, inode); | ||
434 | } | 420 | } |
421 | *de = alias; | ||
435 | 422 | ||
436 | if (!it_disposition(it, DISP_LOOKUP_NEG)) { | 423 | if (!it_disposition(it, DISP_LOOKUP_NEG)) { |
437 | /* we have lookup look - unhide dentry */ | 424 | /* we have lookup look - unhide dentry */ |
@@ -587,6 +574,24 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, | |||
587 | dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode, | 574 | dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode, |
588 | *opened); | 575 | *opened); |
589 | 576 | ||
577 | /* Only negative dentries enter here */ | ||
578 | LASSERT(!d_inode(dentry)); | ||
579 | |||
580 | if (!d_in_lookup(dentry)) { | ||
581 | /* A valid negative dentry that just passed revalidation, | ||
582 | * there's little point to try and open it server-side, | ||
583 | * even though there's a minuscle chance it might succeed. | ||
584 | * Either way it's a valid race to just return -ENOENT here. | ||
585 | */ | ||
586 | if (!(open_flags & O_CREAT)) | ||
587 | return -ENOENT; | ||
588 | |||
589 | /* Otherwise we just unhash it to be rehashed afresh via | ||
590 | * lookup if necessary | ||
591 | */ | ||
592 | d_drop(dentry); | ||
593 | } | ||
594 | |||
590 | it = kzalloc(sizeof(*it), GFP_NOFS); | 595 | it = kzalloc(sizeof(*it), GFP_NOFS); |
591 | if (!it) | 596 | if (!it) |
592 | return -ENOMEM; | 597 | return -ENOMEM; |
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO index a10d4f82b954..13224694a8ae 100644 --- a/drivers/staging/media/cec/TODO +++ b/drivers/staging/media/cec/TODO | |||
@@ -12,6 +12,7 @@ Hopefully this will happen later in 2016. | |||
12 | 12 | ||
13 | Other TODOs: | 13 | Other TODOs: |
14 | 14 | ||
15 | - There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that? | ||
15 | - Add a flag to inhibit passing CEC RC messages to the rc subsystem. | 16 | - Add a flag to inhibit passing CEC RC messages to the rc subsystem. |
16 | Applications should be able to choose this when calling S_LOG_ADDRS. | 17 | Applications should be able to choose this when calling S_LOG_ADDRS. |
17 | - If the reply field of cec_msg is set then when the reply arrives it | 18 | - If the reply field of cec_msg is set then when the reply arrives it |
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c index b2393bbacb26..946986f3ac0d 100644 --- a/drivers/staging/media/cec/cec-adap.c +++ b/drivers/staging/media/cec/cec-adap.c | |||
@@ -124,10 +124,10 @@ static void cec_queue_event(struct cec_adapter *adap, | |||
124 | u64 ts = ktime_get_ns(); | 124 | u64 ts = ktime_get_ns(); |
125 | struct cec_fh *fh; | 125 | struct cec_fh *fh; |
126 | 126 | ||
127 | mutex_lock(&adap->devnode.fhs_lock); | 127 | mutex_lock(&adap->devnode.lock); |
128 | list_for_each_entry(fh, &adap->devnode.fhs, list) | 128 | list_for_each_entry(fh, &adap->devnode.fhs, list) |
129 | cec_queue_event_fh(fh, ev, ts); | 129 | cec_queue_event_fh(fh, ev, ts); |
130 | mutex_unlock(&adap->devnode.fhs_lock); | 130 | mutex_unlock(&adap->devnode.lock); |
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
@@ -191,12 +191,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap, | |||
191 | u32 monitor_mode = valid_la ? CEC_MODE_MONITOR : | 191 | u32 monitor_mode = valid_la ? CEC_MODE_MONITOR : |
192 | CEC_MODE_MONITOR_ALL; | 192 | CEC_MODE_MONITOR_ALL; |
193 | 193 | ||
194 | mutex_lock(&adap->devnode.fhs_lock); | 194 | mutex_lock(&adap->devnode.lock); |
195 | list_for_each_entry(fh, &adap->devnode.fhs, list) { | 195 | list_for_each_entry(fh, &adap->devnode.fhs, list) { |
196 | if (fh->mode_follower >= monitor_mode) | 196 | if (fh->mode_follower >= monitor_mode) |
197 | cec_queue_msg_fh(fh, msg); | 197 | cec_queue_msg_fh(fh, msg); |
198 | } | 198 | } |
199 | mutex_unlock(&adap->devnode.fhs_lock); | 199 | mutex_unlock(&adap->devnode.lock); |
200 | } | 200 | } |
201 | 201 | ||
202 | /* | 202 | /* |
@@ -207,12 +207,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap, | |||
207 | { | 207 | { |
208 | struct cec_fh *fh; | 208 | struct cec_fh *fh; |
209 | 209 | ||
210 | mutex_lock(&adap->devnode.fhs_lock); | 210 | mutex_lock(&adap->devnode.lock); |
211 | list_for_each_entry(fh, &adap->devnode.fhs, list) { | 211 | list_for_each_entry(fh, &adap->devnode.fhs, list) { |
212 | if (fh->mode_follower == CEC_MODE_FOLLOWER) | 212 | if (fh->mode_follower == CEC_MODE_FOLLOWER) |
213 | cec_queue_msg_fh(fh, msg); | 213 | cec_queue_msg_fh(fh, msg); |
214 | } | 214 | } |
215 | mutex_unlock(&adap->devnode.fhs_lock); | 215 | mutex_unlock(&adap->devnode.lock); |
216 | } | 216 | } |
217 | 217 | ||
218 | /* Notify userspace of an adapter state change. */ | 218 | /* Notify userspace of an adapter state change. */ |
@@ -851,6 +851,9 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg) | |||
851 | if (!valid_la || msg->len <= 1) | 851 | if (!valid_la || msg->len <= 1) |
852 | return; | 852 | return; |
853 | 853 | ||
854 | if (adap->log_addrs.log_addr_mask == 0) | ||
855 | return; | ||
856 | |||
854 | /* | 857 | /* |
855 | * Process the message on the protocol level. If is_reply is true, | 858 | * Process the message on the protocol level. If is_reply is true, |
856 | * then cec_receive_notify() won't pass on the reply to the listener(s) | 859 | * then cec_receive_notify() won't pass on the reply to the listener(s) |
@@ -1047,11 +1050,17 @@ static int cec_config_thread_func(void *arg) | |||
1047 | dprintk(1, "could not claim LA %d\n", i); | 1050 | dprintk(1, "could not claim LA %d\n", i); |
1048 | } | 1051 | } |
1049 | 1052 | ||
1053 | if (adap->log_addrs.log_addr_mask == 0 && | ||
1054 | !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK)) | ||
1055 | goto unconfigure; | ||
1056 | |||
1050 | configured: | 1057 | configured: |
1051 | if (adap->log_addrs.log_addr_mask == 0) { | 1058 | if (adap->log_addrs.log_addr_mask == 0) { |
1052 | /* Fall back to unregistered */ | 1059 | /* Fall back to unregistered */ |
1053 | las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED; | 1060 | las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED; |
1054 | las->log_addr_mask = 1 << las->log_addr[0]; | 1061 | las->log_addr_mask = 1 << las->log_addr[0]; |
1062 | for (i = 1; i < las->num_log_addrs; i++) | ||
1063 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; | ||
1055 | } | 1064 | } |
1056 | adap->is_configured = true; | 1065 | adap->is_configured = true; |
1057 | adap->is_configuring = false; | 1066 | adap->is_configuring = false; |
@@ -1070,6 +1079,8 @@ configured: | |||
1070 | cec_report_features(adap, i); | 1079 | cec_report_features(adap, i); |
1071 | cec_report_phys_addr(adap, i); | 1080 | cec_report_phys_addr(adap, i); |
1072 | } | 1081 | } |
1082 | for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) | ||
1083 | las->log_addr[i] = CEC_LOG_ADDR_INVALID; | ||
1073 | mutex_lock(&adap->lock); | 1084 | mutex_lock(&adap->lock); |
1074 | adap->kthread_config = NULL; | 1085 | adap->kthread_config = NULL; |
1075 | mutex_unlock(&adap->lock); | 1086 | mutex_unlock(&adap->lock); |
@@ -1398,7 +1409,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, | |||
1398 | u8 init_laddr = cec_msg_initiator(msg); | 1409 | u8 init_laddr = cec_msg_initiator(msg); |
1399 | u8 devtype = cec_log_addr2dev(adap, dest_laddr); | 1410 | u8 devtype = cec_log_addr2dev(adap, dest_laddr); |
1400 | int la_idx = cec_log_addr2idx(adap, dest_laddr); | 1411 | int la_idx = cec_log_addr2idx(adap, dest_laddr); |
1401 | bool is_directed = la_idx >= 0; | ||
1402 | bool from_unregistered = init_laddr == 0xf; | 1412 | bool from_unregistered = init_laddr == 0xf; |
1403 | struct cec_msg tx_cec_msg = { }; | 1413 | struct cec_msg tx_cec_msg = { }; |
1404 | 1414 | ||
@@ -1560,7 +1570,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, | |||
1560 | * Unprocessed messages are aborted if userspace isn't doing | 1570 | * Unprocessed messages are aborted if userspace isn't doing |
1561 | * any processing either. | 1571 | * any processing either. |
1562 | */ | 1572 | */ |
1563 | if (is_directed && !is_reply && !adap->follower_cnt && | 1573 | if (!is_broadcast && !is_reply && !adap->follower_cnt && |
1564 | !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT) | 1574 | !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT) |
1565 | return cec_feature_abort(adap, msg); | 1575 | return cec_feature_abort(adap, msg); |
1566 | break; | 1576 | break; |
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/staging/media/cec/cec-api.c index 7be7615a0fdf..e274e2f22398 100644 --- a/drivers/staging/media/cec/cec-api.c +++ b/drivers/staging/media/cec/cec-api.c | |||
@@ -162,7 +162,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh, | |||
162 | return -ENOTTY; | 162 | return -ENOTTY; |
163 | if (copy_from_user(&log_addrs, parg, sizeof(log_addrs))) | 163 | if (copy_from_user(&log_addrs, parg, sizeof(log_addrs))) |
164 | return -EFAULT; | 164 | return -EFAULT; |
165 | log_addrs.flags = 0; | 165 | log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK; |
166 | mutex_lock(&adap->lock); | 166 | mutex_lock(&adap->lock); |
167 | if (!adap->is_configuring && | 167 | if (!adap->is_configuring && |
168 | (!log_addrs.num_log_addrs || !adap->is_configured) && | 168 | (!log_addrs.num_log_addrs || !adap->is_configured) && |
@@ -435,7 +435,7 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
435 | void __user *parg = (void __user *)arg; | 435 | void __user *parg = (void __user *)arg; |
436 | 436 | ||
437 | if (!devnode->registered) | 437 | if (!devnode->registered) |
438 | return -EIO; | 438 | return -ENODEV; |
439 | 439 | ||
440 | switch (cmd) { | 440 | switch (cmd) { |
441 | case CEC_ADAP_G_CAPS: | 441 | case CEC_ADAP_G_CAPS: |
@@ -508,14 +508,14 @@ static int cec_open(struct inode *inode, struct file *filp) | |||
508 | 508 | ||
509 | filp->private_data = fh; | 509 | filp->private_data = fh; |
510 | 510 | ||
511 | mutex_lock(&devnode->fhs_lock); | 511 | mutex_lock(&devnode->lock); |
512 | /* Queue up initial state events */ | 512 | /* Queue up initial state events */ |
513 | ev_state.state_change.phys_addr = adap->phys_addr; | 513 | ev_state.state_change.phys_addr = adap->phys_addr; |
514 | ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask; | 514 | ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask; |
515 | cec_queue_event_fh(fh, &ev_state, 0); | 515 | cec_queue_event_fh(fh, &ev_state, 0); |
516 | 516 | ||
517 | list_add(&fh->list, &devnode->fhs); | 517 | list_add(&fh->list, &devnode->fhs); |
518 | mutex_unlock(&devnode->fhs_lock); | 518 | mutex_unlock(&devnode->lock); |
519 | 519 | ||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
@@ -540,9 +540,9 @@ static int cec_release(struct inode *inode, struct file *filp) | |||
540 | cec_monitor_all_cnt_dec(adap); | 540 | cec_monitor_all_cnt_dec(adap); |
541 | mutex_unlock(&adap->lock); | 541 | mutex_unlock(&adap->lock); |
542 | 542 | ||
543 | mutex_lock(&devnode->fhs_lock); | 543 | mutex_lock(&devnode->lock); |
544 | list_del(&fh->list); | 544 | list_del(&fh->list); |
545 | mutex_unlock(&devnode->fhs_lock); | 545 | mutex_unlock(&devnode->lock); |
546 | 546 | ||
547 | /* Unhook pending transmits from this filehandle. */ | 547 | /* Unhook pending transmits from this filehandle. */ |
548 | mutex_lock(&adap->lock); | 548 | mutex_lock(&adap->lock); |
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c index 112a5fae12f5..3b1e4d2b190d 100644 --- a/drivers/staging/media/cec/cec-core.c +++ b/drivers/staging/media/cec/cec-core.c | |||
@@ -51,31 +51,29 @@ int cec_get_device(struct cec_devnode *devnode) | |||
51 | { | 51 | { |
52 | /* | 52 | /* |
53 | * Check if the cec device is available. This needs to be done with | 53 | * Check if the cec device is available. This needs to be done with |
54 | * the cec_devnode_lock held to prevent an open/unregister race: | 54 | * the devnode->lock held to prevent an open/unregister race: |
55 | * without the lock, the device could be unregistered and freed between | 55 | * without the lock, the device could be unregistered and freed between |
56 | * the devnode->registered check and get_device() calls, leading to | 56 | * the devnode->registered check and get_device() calls, leading to |
57 | * a crash. | 57 | * a crash. |
58 | */ | 58 | */ |
59 | mutex_lock(&cec_devnode_lock); | 59 | mutex_lock(&devnode->lock); |
60 | /* | 60 | /* |
61 | * return ENXIO if the cec device has been removed | 61 | * return ENXIO if the cec device has been removed |
62 | * already or if it is not registered anymore. | 62 | * already or if it is not registered anymore. |
63 | */ | 63 | */ |
64 | if (!devnode->registered) { | 64 | if (!devnode->registered) { |
65 | mutex_unlock(&cec_devnode_lock); | 65 | mutex_unlock(&devnode->lock); |
66 | return -ENXIO; | 66 | return -ENXIO; |
67 | } | 67 | } |
68 | /* and increase the device refcount */ | 68 | /* and increase the device refcount */ |
69 | get_device(&devnode->dev); | 69 | get_device(&devnode->dev); |
70 | mutex_unlock(&cec_devnode_lock); | 70 | mutex_unlock(&devnode->lock); |
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | void cec_put_device(struct cec_devnode *devnode) | 74 | void cec_put_device(struct cec_devnode *devnode) |
75 | { | 75 | { |
76 | mutex_lock(&cec_devnode_lock); | ||
77 | put_device(&devnode->dev); | 76 | put_device(&devnode->dev); |
78 | mutex_unlock(&cec_devnode_lock); | ||
79 | } | 77 | } |
80 | 78 | ||
81 | /* Called when the last user of the cec device exits. */ | 79 | /* Called when the last user of the cec device exits. */ |
@@ -84,11 +82,10 @@ static void cec_devnode_release(struct device *cd) | |||
84 | struct cec_devnode *devnode = to_cec_devnode(cd); | 82 | struct cec_devnode *devnode = to_cec_devnode(cd); |
85 | 83 | ||
86 | mutex_lock(&cec_devnode_lock); | 84 | mutex_lock(&cec_devnode_lock); |
87 | |||
88 | /* Mark device node number as free */ | 85 | /* Mark device node number as free */ |
89 | clear_bit(devnode->minor, cec_devnode_nums); | 86 | clear_bit(devnode->minor, cec_devnode_nums); |
90 | |||
91 | mutex_unlock(&cec_devnode_lock); | 87 | mutex_unlock(&cec_devnode_lock); |
88 | |||
92 | cec_delete_adapter(to_cec_adapter(devnode)); | 89 | cec_delete_adapter(to_cec_adapter(devnode)); |
93 | } | 90 | } |
94 | 91 | ||
@@ -117,7 +114,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode, | |||
117 | 114 | ||
118 | /* Initialization */ | 115 | /* Initialization */ |
119 | INIT_LIST_HEAD(&devnode->fhs); | 116 | INIT_LIST_HEAD(&devnode->fhs); |
120 | mutex_init(&devnode->fhs_lock); | 117 | mutex_init(&devnode->lock); |
121 | 118 | ||
122 | /* Part 1: Find a free minor number */ | 119 | /* Part 1: Find a free minor number */ |
123 | mutex_lock(&cec_devnode_lock); | 120 | mutex_lock(&cec_devnode_lock); |
@@ -160,7 +157,9 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode, | |||
160 | cdev_del: | 157 | cdev_del: |
161 | cdev_del(&devnode->cdev); | 158 | cdev_del(&devnode->cdev); |
162 | clr_bit: | 159 | clr_bit: |
160 | mutex_lock(&cec_devnode_lock); | ||
163 | clear_bit(devnode->minor, cec_devnode_nums); | 161 | clear_bit(devnode->minor, cec_devnode_nums); |
162 | mutex_unlock(&cec_devnode_lock); | ||
164 | return ret; | 163 | return ret; |
165 | } | 164 | } |
166 | 165 | ||
@@ -177,17 +176,21 @@ static void cec_devnode_unregister(struct cec_devnode *devnode) | |||
177 | { | 176 | { |
178 | struct cec_fh *fh; | 177 | struct cec_fh *fh; |
179 | 178 | ||
179 | mutex_lock(&devnode->lock); | ||
180 | |||
180 | /* Check if devnode was never registered or already unregistered */ | 181 | /* Check if devnode was never registered or already unregistered */ |
181 | if (!devnode->registered || devnode->unregistered) | 182 | if (!devnode->registered || devnode->unregistered) { |
183 | mutex_unlock(&devnode->lock); | ||
182 | return; | 184 | return; |
185 | } | ||
183 | 186 | ||
184 | mutex_lock(&devnode->fhs_lock); | ||
185 | list_for_each_entry(fh, &devnode->fhs, list) | 187 | list_for_each_entry(fh, &devnode->fhs, list) |
186 | wake_up_interruptible(&fh->wait); | 188 | wake_up_interruptible(&fh->wait); |
187 | mutex_unlock(&devnode->fhs_lock); | ||
188 | 189 | ||
189 | devnode->registered = false; | 190 | devnode->registered = false; |
190 | devnode->unregistered = true; | 191 | devnode->unregistered = true; |
192 | mutex_unlock(&devnode->lock); | ||
193 | |||
191 | device_del(&devnode->dev); | 194 | device_del(&devnode->dev); |
192 | cdev_del(&devnode->cdev); | 195 | cdev_del(&devnode->cdev); |
193 | put_device(&devnode->dev); | 196 | put_device(&devnode->dev); |
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c index 94f8590492dc..ed8bd95ad6d0 100644 --- a/drivers/staging/media/pulse8-cec/pulse8-cec.c +++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c | |||
@@ -114,14 +114,11 @@ static void pulse8_irq_work_handler(struct work_struct *work) | |||
114 | cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK, | 114 | cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK, |
115 | 0, 0, 0, 0); | 115 | 0, 0, 0, 0); |
116 | break; | 116 | break; |
117 | case MSGCODE_TRANSMIT_FAILED_LINE: | ||
118 | cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ARB_LOST, | ||
119 | 1, 0, 0, 0); | ||
120 | break; | ||
121 | case MSGCODE_TRANSMIT_FAILED_ACK: | 117 | case MSGCODE_TRANSMIT_FAILED_ACK: |
122 | cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK, | 118 | cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK, |
123 | 0, 1, 0, 0); | 119 | 0, 1, 0, 0); |
124 | break; | 120 | break; |
121 | case MSGCODE_TRANSMIT_FAILED_LINE: | ||
125 | case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: | 122 | case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: |
126 | case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: | 123 | case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: |
127 | cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR, | 124 | cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR, |
@@ -170,6 +167,9 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data, | |||
170 | case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: | 167 | case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: |
171 | schedule_work(&pulse8->work); | 168 | schedule_work(&pulse8->work); |
172 | break; | 169 | break; |
170 | case MSGCODE_HIGH_ERROR: | ||
171 | case MSGCODE_LOW_ERROR: | ||
172 | case MSGCODE_RECEIVE_FAILED: | ||
173 | case MSGCODE_TIMEOUT_ERROR: | 173 | case MSGCODE_TIMEOUT_ERROR: |
174 | break; | 174 | break; |
175 | case MSGCODE_COMMAND_ACCEPTED: | 175 | case MSGCODE_COMMAND_ACCEPTED: |
@@ -388,7 +388,7 @@ static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, | |||
388 | int err; | 388 | int err; |
389 | 389 | ||
390 | cmd[0] = MSGCODE_TRANSMIT_IDLETIME; | 390 | cmd[0] = MSGCODE_TRANSMIT_IDLETIME; |
391 | cmd[1] = 3; | 391 | cmd[1] = signal_free_time; |
392 | err = pulse8_send_and_wait(pulse8, cmd, 2, | 392 | err = pulse8_send_and_wait(pulse8, cmd, 2, |
393 | MSGCODE_COMMAND_ACCEPTED, 1); | 393 | MSGCODE_COMMAND_ACCEPTED, 1); |
394 | cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY; | 394 | cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY; |
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 0b1760cba6e3..78f524fcd214 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c | |||
@@ -3363,7 +3363,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) | |||
3363 | if (!hif_workqueue) { | 3363 | if (!hif_workqueue) { |
3364 | netdev_err(vif->ndev, "Failed to create workqueue\n"); | 3364 | netdev_err(vif->ndev, "Failed to create workqueue\n"); |
3365 | result = -ENOMEM; | 3365 | result = -ENOMEM; |
3366 | goto _fail_mq_; | 3366 | goto _fail_; |
3367 | } | 3367 | } |
3368 | 3368 | ||
3369 | setup_timer(&periodic_rssi, GetPeriodicRSSI, | 3369 | setup_timer(&periodic_rssi, GetPeriodicRSSI, |
@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) | |||
3391 | 3391 | ||
3392 | clients_count++; | 3392 | clients_count++; |
3393 | 3393 | ||
3394 | _fail_mq_: | ||
3395 | destroy_workqueue(hif_workqueue); | 3394 | destroy_workqueue(hif_workqueue); |
3396 | _fail_: | 3395 | _fail_: |
3397 | return result; | 3396 | return result; |
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 3a66255f14fc..32215110d597 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c | |||
@@ -648,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev) | |||
648 | mutex_unlock(&wl->hif_cs); | 648 | mutex_unlock(&wl->hif_cs); |
649 | } | 649 | } |
650 | if (&wl->txq_event) | 650 | if (&wl->txq_event) |
651 | wait_for_completion(&wl->txq_event); | 651 | complete(&wl->txq_event); |
652 | 652 | ||
653 | wlan_deinitialize_threads(dev); | 653 | wlan_deinitialize_threads(dev); |
654 | deinit_irq(dev); | 654 | deinit_irq(dev); |
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index 9092600a1794..2c2e8aca8305 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | |||
@@ -1191,7 +1191,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, | |||
1191 | struct wilc_priv *priv; | 1191 | struct wilc_priv *priv; |
1192 | struct wilc_vif *vif; | 1192 | struct wilc_vif *vif; |
1193 | u32 i = 0; | 1193 | u32 i = 0; |
1194 | u32 associatedsta = 0; | 1194 | u32 associatedsta = ~0; |
1195 | u32 inactive_time = 0; | 1195 | u32 inactive_time = 0; |
1196 | priv = wiphy_priv(wiphy); | 1196 | priv = wiphy_priv(wiphy); |
1197 | vif = netdev_priv(dev); | 1197 | vif = netdev_priv(dev); |
@@ -1204,7 +1204,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, | |||
1204 | } | 1204 | } |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | if (associatedsta == -1) { | 1207 | if (associatedsta == ~0) { |
1208 | netdev_err(dev, "sta required is not associated\n"); | 1208 | netdev_err(dev, "sta required is not associated\n"); |
1209 | return -ENOENT; | 1209 | return -ENOENT; |
1210 | } | 1210 | } |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 3788ed74c9ab..a32b41783b77 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, | |||
740 | } | 740 | } |
741 | 741 | ||
742 | /* Bind cpufreq callbacks to thermal cooling device ops */ | 742 | /* Bind cpufreq callbacks to thermal cooling device ops */ |
743 | |||
743 | static struct thermal_cooling_device_ops cpufreq_cooling_ops = { | 744 | static struct thermal_cooling_device_ops cpufreq_cooling_ops = { |
744 | .get_max_state = cpufreq_get_max_state, | 745 | .get_max_state = cpufreq_get_max_state, |
745 | .get_cur_state = cpufreq_get_cur_state, | 746 | .get_cur_state = cpufreq_get_cur_state, |
746 | .set_cur_state = cpufreq_set_cur_state, | 747 | .set_cur_state = cpufreq_set_cur_state, |
747 | }; | 748 | }; |
748 | 749 | ||
750 | static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = { | ||
751 | .get_max_state = cpufreq_get_max_state, | ||
752 | .get_cur_state = cpufreq_get_cur_state, | ||
753 | .set_cur_state = cpufreq_set_cur_state, | ||
754 | .get_requested_power = cpufreq_get_requested_power, | ||
755 | .state2power = cpufreq_state2power, | ||
756 | .power2state = cpufreq_power2state, | ||
757 | }; | ||
758 | |||
749 | /* Notifier for cpufreq policy change */ | 759 | /* Notifier for cpufreq policy change */ |
750 | static struct notifier_block thermal_cpufreq_notifier_block = { | 760 | static struct notifier_block thermal_cpufreq_notifier_block = { |
751 | .notifier_call = cpufreq_thermal_notifier, | 761 | .notifier_call = cpufreq_thermal_notifier, |
@@ -795,6 +805,7 @@ __cpufreq_cooling_register(struct device_node *np, | |||
795 | struct cpumask temp_mask; | 805 | struct cpumask temp_mask; |
796 | unsigned int freq, i, num_cpus; | 806 | unsigned int freq, i, num_cpus; |
797 | int ret; | 807 | int ret; |
808 | struct thermal_cooling_device_ops *cooling_ops; | ||
798 | 809 | ||
799 | cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); | 810 | cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); |
800 | policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); | 811 | policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); |
@@ -850,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np, | |||
850 | cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); | 861 | cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); |
851 | 862 | ||
852 | if (capacitance) { | 863 | if (capacitance) { |
853 | cpufreq_cooling_ops.get_requested_power = | ||
854 | cpufreq_get_requested_power; | ||
855 | cpufreq_cooling_ops.state2power = cpufreq_state2power; | ||
856 | cpufreq_cooling_ops.power2state = cpufreq_power2state; | ||
857 | cpufreq_dev->plat_get_static_power = plat_static_func; | 864 | cpufreq_dev->plat_get_static_power = plat_static_func; |
858 | 865 | ||
859 | ret = build_dyn_power_table(cpufreq_dev, capacitance); | 866 | ret = build_dyn_power_table(cpufreq_dev, capacitance); |
@@ -861,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np, | |||
861 | cool_dev = ERR_PTR(ret); | 868 | cool_dev = ERR_PTR(ret); |
862 | goto free_table; | 869 | goto free_table; |
863 | } | 870 | } |
871 | |||
872 | cooling_ops = &cpufreq_power_cooling_ops; | ||
873 | } else { | ||
874 | cooling_ops = &cpufreq_cooling_ops; | ||
864 | } | 875 | } |
865 | 876 | ||
866 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); | 877 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); |
@@ -885,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np, | |||
885 | cpufreq_dev->id); | 896 | cpufreq_dev->id); |
886 | 897 | ||
887 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | 898 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, |
888 | &cpufreq_cooling_ops); | 899 | cooling_ops); |
889 | if (IS_ERR(cool_dev)) | 900 | if (IS_ERR(cool_dev)) |
890 | goto remove_idr; | 901 | goto remove_idr; |
891 | 902 | ||
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c5547bd711db..e473548b5d28 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c | |||
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match); | |||
471 | 471 | ||
472 | static int imx_thermal_probe(struct platform_device *pdev) | 472 | static int imx_thermal_probe(struct platform_device *pdev) |
473 | { | 473 | { |
474 | const struct of_device_id *of_id = | ||
475 | of_match_device(of_imx_thermal_match, &pdev->dev); | ||
476 | struct imx_thermal_data *data; | 474 | struct imx_thermal_data *data; |
477 | struct regmap *map; | 475 | struct regmap *map; |
478 | int measure_freq; | 476 | int measure_freq; |
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev) | |||
490 | } | 488 | } |
491 | data->tempmon = map; | 489 | data->tempmon = map; |
492 | 490 | ||
493 | data->socdata = of_id->data; | 491 | data->socdata = of_device_get_match_data(&pdev->dev); |
494 | 492 | ||
495 | /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ | 493 | /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ |
496 | if (data->socdata->version == TEMPMON_IMX6SX) { | 494 | if (data->socdata->version == TEMPMON_IMX6SX) { |
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index a578cd257db4..1891f34ab7fc 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c | |||
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = { | |||
225 | .remove = int3406_thermal_remove, | 225 | .remove = int3406_thermal_remove, |
226 | .driver = { | 226 | .driver = { |
227 | .name = "int3406 thermal", | 227 | .name = "int3406 thermal", |
228 | .owner = THIS_MODULE, | ||
229 | .acpi_match_table = int3406_thermal_match, | 228 | .acpi_match_table = int3406_thermal_match, |
230 | }, | 229 | }, |
231 | }; | 230 | }; |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 71a339271fa5..5f817923f374 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -504,6 +504,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
504 | if (IS_ERR(priv->zone)) { | 504 | if (IS_ERR(priv->zone)) { |
505 | dev_err(dev, "can't register thermal zone\n"); | 505 | dev_err(dev, "can't register thermal zone\n"); |
506 | ret = PTR_ERR(priv->zone); | 506 | ret = PTR_ERR(priv->zone); |
507 | priv->zone = NULL; | ||
507 | goto error_unregister; | 508 | goto error_unregister; |
508 | } | 509 | } |
509 | 510 | ||
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 9c15344b657a..a8c20413dbda 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c | |||
@@ -651,6 +651,12 @@ static struct pci_device_id nhi_ids[] = { | |||
651 | { | 651 | { |
652 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | 652 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
653 | .vendor = PCI_VENDOR_ID_INTEL, | 653 | .vendor = PCI_VENDOR_ID_INTEL, |
654 | .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, | ||
655 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, | ||
656 | }, | ||
657 | { | ||
658 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | ||
659 | .vendor = PCI_VENDOR_ID_INTEL, | ||
654 | .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, | 660 | .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, |
655 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, | 661 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, |
656 | }, | 662 | }, |
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 1e116f53d6dd..9840fdecb73b 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c | |||
@@ -372,7 +372,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route) | |||
372 | 372 | ||
373 | if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && | 373 | if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && |
374 | sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && | 374 | sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && |
375 | sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE) | 375 | sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE && |
376 | sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE && | ||
377 | sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE) | ||
376 | tb_sw_warn(sw, "unsupported switch device id %#x\n", | 378 | tb_sw_warn(sw, "unsupported switch device id %#x\n", |
377 | sw->config.device_id); | 379 | sw->config.device_id); |
378 | 380 | ||
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 122e0e4029fe..1a16feac9a36 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h | |||
@@ -15,8 +15,6 @@ | |||
15 | #include <linux/serial_reg.h> | 15 | #include <linux/serial_reg.h> |
16 | #include <linux/dmaengine.h> | 16 | #include <linux/dmaengine.h> |
17 | 17 | ||
18 | #include "../serial_mctrl_gpio.h" | ||
19 | |||
20 | struct uart_8250_dma { | 18 | struct uart_8250_dma { |
21 | int (*tx_dma)(struct uart_8250_port *p); | 19 | int (*tx_dma)(struct uart_8250_port *p); |
22 | int (*rx_dma)(struct uart_8250_port *p); | 20 | int (*rx_dma)(struct uart_8250_port *p); |
@@ -133,43 +131,12 @@ void serial8250_em485_destroy(struct uart_8250_port *p); | |||
133 | 131 | ||
134 | static inline void serial8250_out_MCR(struct uart_8250_port *up, int value) | 132 | static inline void serial8250_out_MCR(struct uart_8250_port *up, int value) |
135 | { | 133 | { |
136 | int mctrl_gpio = 0; | ||
137 | |||
138 | serial_out(up, UART_MCR, value); | 134 | serial_out(up, UART_MCR, value); |
139 | |||
140 | if (value & UART_MCR_RTS) | ||
141 | mctrl_gpio |= TIOCM_RTS; | ||
142 | if (value & UART_MCR_DTR) | ||
143 | mctrl_gpio |= TIOCM_DTR; | ||
144 | |||
145 | mctrl_gpio_set(up->gpios, mctrl_gpio); | ||
146 | } | 135 | } |
147 | 136 | ||
148 | static inline int serial8250_in_MCR(struct uart_8250_port *up) | 137 | static inline int serial8250_in_MCR(struct uart_8250_port *up) |
149 | { | 138 | { |
150 | int mctrl, mctrl_gpio = 0; | 139 | return serial_in(up, UART_MCR); |
151 | |||
152 | mctrl = serial_in(up, UART_MCR); | ||
153 | |||
154 | /* save current MCR values */ | ||
155 | if (mctrl & UART_MCR_RTS) | ||
156 | mctrl_gpio |= TIOCM_RTS; | ||
157 | if (mctrl & UART_MCR_DTR) | ||
158 | mctrl_gpio |= TIOCM_DTR; | ||
159 | |||
160 | mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio); | ||
161 | |||
162 | if (mctrl_gpio & TIOCM_RTS) | ||
163 | mctrl |= UART_MCR_RTS; | ||
164 | else | ||
165 | mctrl &= ~UART_MCR_RTS; | ||
166 | |||
167 | if (mctrl_gpio & TIOCM_DTR) | ||
168 | mctrl |= UART_MCR_DTR; | ||
169 | else | ||
170 | mctrl &= ~UART_MCR_DTR; | ||
171 | |||
172 | return mctrl; | ||
173 | } | 140 | } |
174 | 141 | ||
175 | #if defined(__alpha__) && !defined(CONFIG_PCI) | 142 | #if defined(__alpha__) && !defined(CONFIG_PCI) |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 13ad5c3d2e68..dcf43f66404f 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
@@ -974,8 +974,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up) | |||
974 | 974 | ||
975 | uart = serial8250_find_match_or_unused(&up->port); | 975 | uart = serial8250_find_match_or_unused(&up->port); |
976 | if (uart && uart->port.type != PORT_8250_CIR) { | 976 | if (uart && uart->port.type != PORT_8250_CIR) { |
977 | struct mctrl_gpios *gpios; | ||
978 | |||
979 | if (uart->port.dev) | 977 | if (uart->port.dev) |
980 | uart_remove_one_port(&serial8250_reg, &uart->port); | 978 | uart_remove_one_port(&serial8250_reg, &uart->port); |
981 | 979 | ||
@@ -1013,13 +1011,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up) | |||
1013 | if (up->port.flags & UPF_FIXED_TYPE) | 1011 | if (up->port.flags & UPF_FIXED_TYPE) |
1014 | uart->port.type = up->port.type; | 1012 | uart->port.type = up->port.type; |
1015 | 1013 | ||
1016 | gpios = mctrl_gpio_init(&uart->port, 0); | ||
1017 | if (IS_ERR(gpios)) { | ||
1018 | if (PTR_ERR(gpios) != -ENOSYS) | ||
1019 | return PTR_ERR(gpios); | ||
1020 | } else | ||
1021 | uart->gpios = gpios; | ||
1022 | |||
1023 | serial8250_set_defaults(uart); | 1014 | serial8250_set_defaults(uart); |
1024 | 1015 | ||
1025 | /* Possibly override default I/O functions. */ | 1016 | /* Possibly override default I/O functions. */ |
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index 737b4b3957b0..0facc789fe7d 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #define IO_ADDR2 0x60 | 31 | #define IO_ADDR2 0x60 |
32 | #define LDN 0x7 | 32 | #define LDN 0x7 |
33 | 33 | ||
34 | #define IRQ_MODE 0x70 | 34 | #define FINTEK_IRQ_MODE 0x70 |
35 | #define IRQ_SHARE BIT(4) | 35 | #define IRQ_SHARE BIT(4) |
36 | #define IRQ_MODE_MASK (BIT(6) | BIT(5)) | 36 | #define IRQ_MODE_MASK (BIT(6) | BIT(5)) |
37 | #define IRQ_LEVEL_LOW 0 | 37 | #define IRQ_LEVEL_LOW 0 |
@@ -195,7 +195,7 @@ static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode) | |||
195 | outb(LDN, pdata->base_port + ADDR_PORT); | 195 | outb(LDN, pdata->base_port + ADDR_PORT); |
196 | outb(pdata->index, pdata->base_port + DATA_PORT); | 196 | outb(pdata->index, pdata->base_port + DATA_PORT); |
197 | 197 | ||
198 | outb(IRQ_MODE, pdata->base_port + ADDR_PORT); | 198 | outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT); |
199 | tmp = inb(pdata->base_port + DATA_PORT); | 199 | tmp = inb(pdata->base_port + DATA_PORT); |
200 | 200 | ||
201 | tmp &= ~IRQ_MODE_MASK; | 201 | tmp &= ~IRQ_MODE_MASK; |
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index 339de9cd0866..20c5db2f4264 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c | |||
@@ -168,6 +168,9 @@ static void mid8250_set_termios(struct uart_port *p, | |||
168 | unsigned long w = BIT(24) - 1; | 168 | unsigned long w = BIT(24) - 1; |
169 | unsigned long mul, div; | 169 | unsigned long mul, div; |
170 | 170 | ||
171 | /* Gracefully handle the B0 case: fall back to B9600 */ | ||
172 | fuart = fuart ? fuart : 9600 * 16; | ||
173 | |||
171 | if (mid->board->freq < fuart) { | 174 | if (mid->board->freq < fuart) { |
172 | /* Find prescaler value that satisfies Fuart < Fref */ | 175 | /* Find prescaler value that satisfies Fuart < Fref */ |
173 | if (mid->board->freq > baud) | 176 | if (mid->board->freq > baud) |
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index e14982f36a04..61ad6c3b20a0 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c | |||
@@ -134,21 +134,18 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
134 | 134 | ||
135 | serial8250_do_set_mctrl(port, mctrl); | 135 | serial8250_do_set_mctrl(port, mctrl); |
136 | 136 | ||
137 | if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios, | 137 | /* |
138 | UART_GPIO_RTS))) { | 138 | * Turn off autoRTS if RTS is lowered and restore autoRTS setting |
139 | /* | 139 | * if RTS is raised |
140 | * Turn off autoRTS if RTS is lowered and restore autoRTS | 140 | */ |
141 | * setting if RTS is raised | 141 | lcr = serial_in(up, UART_LCR); |
142 | */ | 142 | serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); |
143 | lcr = serial_in(up, UART_LCR); | 143 | if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) |
144 | serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); | 144 | priv->efr |= UART_EFR_RTS; |
145 | if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) | 145 | else |
146 | priv->efr |= UART_EFR_RTS; | 146 | priv->efr &= ~UART_EFR_RTS; |
147 | else | 147 | serial_out(up, UART_EFR, priv->efr); |
148 | priv->efr &= ~UART_EFR_RTS; | 148 | serial_out(up, UART_LCR, lcr); |
149 | serial_out(up, UART_EFR, priv->efr); | ||
150 | serial_out(up, UART_LCR, lcr); | ||
151 | } | ||
152 | } | 149 | } |
153 | 150 | ||
154 | /* | 151 | /* |
@@ -449,9 +446,7 @@ static void omap_8250_set_termios(struct uart_port *port, | |||
449 | priv->efr = 0; | 446 | priv->efr = 0; |
450 | up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); | 447 | up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); |
451 | 448 | ||
452 | if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW | 449 | if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) { |
453 | && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios, | ||
454 | UART_GPIO_RTS))) { | ||
455 | /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */ | 450 | /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */ |
456 | up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; | 451 | up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; |
457 | priv->efr |= UART_EFR_CTS; | 452 | priv->efr |= UART_EFR_CTS; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 20ebaea5c414..bc51b32b2774 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -1950,6 +1950,43 @@ pci_wch_ch38x_setup(struct serial_private *priv, | |||
1950 | #define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 | 1950 | #define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 |
1951 | #define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 | 1951 | #define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 |
1952 | 1952 | ||
1953 | #define PCI_VENDOR_ID_ACCESIO 0x494f | ||
1954 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051 | ||
1955 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053 | ||
1956 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C | ||
1957 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E | ||
1958 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091 | ||
1959 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093 | ||
1960 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099 | ||
1961 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B | ||
1962 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1 | ||
1963 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3 | ||
1964 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA | ||
1965 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC | ||
1966 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108 | ||
1967 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110 | ||
1968 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111 | ||
1969 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118 | ||
1970 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119 | ||
1971 | #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152 | ||
1972 | #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A | ||
1973 | #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190 | ||
1974 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191 | ||
1975 | #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198 | ||
1976 | #define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199 | ||
1977 | #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0 | ||
1978 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A | ||
1979 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B | ||
1980 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A | ||
1981 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B | ||
1982 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098 | ||
1983 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9 | ||
1984 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9 | ||
1985 | #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9 | ||
1986 | #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8 | ||
1987 | |||
1988 | |||
1989 | |||
1953 | /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ | 1990 | /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ |
1954 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 | 1991 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 |
1955 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 | 1992 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 |
@@ -5113,6 +5150,108 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
5113 | 0, | 5150 | 0, |
5114 | 0, pbn_pericom_PI7C9X7958 }, | 5151 | 0, pbn_pericom_PI7C9X7958 }, |
5115 | /* | 5152 | /* |
5153 | * ACCES I/O Products quad | ||
5154 | */ | ||
5155 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB, | ||
5156 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5157 | pbn_pericom_PI7C9X7954 }, | ||
5158 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S, | ||
5159 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5160 | pbn_pericom_PI7C9X7954 }, | ||
5161 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB, | ||
5162 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5163 | pbn_pericom_PI7C9X7954 }, | ||
5164 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S, | ||
5165 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5166 | pbn_pericom_PI7C9X7954 }, | ||
5167 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB, | ||
5168 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5169 | pbn_pericom_PI7C9X7954 }, | ||
5170 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2, | ||
5171 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5172 | pbn_pericom_PI7C9X7954 }, | ||
5173 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB, | ||
5174 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5175 | pbn_pericom_PI7C9X7954 }, | ||
5176 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4, | ||
5177 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5178 | pbn_pericom_PI7C9X7954 }, | ||
5179 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB, | ||
5180 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5181 | pbn_pericom_PI7C9X7954 }, | ||
5182 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM, | ||
5183 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5184 | pbn_pericom_PI7C9X7954 }, | ||
5185 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB, | ||
5186 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5187 | pbn_pericom_PI7C9X7954 }, | ||
5188 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM, | ||
5189 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5190 | pbn_pericom_PI7C9X7954 }, | ||
5191 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1, | ||
5192 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5193 | pbn_pericom_PI7C9X7954 }, | ||
5194 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2, | ||
5195 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5196 | pbn_pericom_PI7C9X7954 }, | ||
5197 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2, | ||
5198 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5199 | pbn_pericom_PI7C9X7954 }, | ||
5200 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4, | ||
5201 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5202 | pbn_pericom_PI7C9X7954 }, | ||
5203 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4, | ||
5204 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5205 | pbn_pericom_PI7C9X7954 }, | ||
5206 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S, | ||
5207 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5208 | pbn_pericom_PI7C9X7954 }, | ||
5209 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, | ||
5210 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5211 | pbn_pericom_PI7C9X7954 }, | ||
5212 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2, | ||
5213 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5214 | pbn_pericom_PI7C9X7954 }, | ||
5215 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2, | ||
5216 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5217 | pbn_pericom_PI7C9X7954 }, | ||
5218 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, | ||
5219 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5220 | pbn_pericom_PI7C9X7954 }, | ||
5221 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4, | ||
5222 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5223 | pbn_pericom_PI7C9X7954 }, | ||
5224 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM, | ||
5225 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5226 | pbn_pericom_PI7C9X7954 }, | ||
5227 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4, | ||
5228 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5229 | pbn_pericom_PI7C9X7958 }, | ||
5230 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4, | ||
5231 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5232 | pbn_pericom_PI7C9X7958 }, | ||
5233 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8, | ||
5234 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5235 | pbn_pericom_PI7C9X7958 }, | ||
5236 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8, | ||
5237 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5238 | pbn_pericom_PI7C9X7958 }, | ||
5239 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4, | ||
5240 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5241 | pbn_pericom_PI7C9X7958 }, | ||
5242 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8, | ||
5243 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5244 | pbn_pericom_PI7C9X7958 }, | ||
5245 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM, | ||
5246 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5247 | pbn_pericom_PI7C9X7958 }, | ||
5248 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM, | ||
5249 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5250 | pbn_pericom_PI7C9X7958 }, | ||
5251 | { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM, | ||
5252 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
5253 | pbn_pericom_PI7C9X7958 }, | ||
5254 | /* | ||
5116 | * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) | 5255 | * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) |
5117 | */ | 5256 | */ |
5118 | { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, | 5257 | { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, |
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 7481b95c6d84..bdfa659b9606 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c | |||
@@ -1618,8 +1618,6 @@ static void serial8250_disable_ms(struct uart_port *port) | |||
1618 | if (up->bugs & UART_BUG_NOMSR) | 1618 | if (up->bugs & UART_BUG_NOMSR) |
1619 | return; | 1619 | return; |
1620 | 1620 | ||
1621 | mctrl_gpio_disable_ms(up->gpios); | ||
1622 | |||
1623 | up->ier &= ~UART_IER_MSI; | 1621 | up->ier &= ~UART_IER_MSI; |
1624 | serial_port_out(port, UART_IER, up->ier); | 1622 | serial_port_out(port, UART_IER, up->ier); |
1625 | } | 1623 | } |
@@ -1632,8 +1630,6 @@ static void serial8250_enable_ms(struct uart_port *port) | |||
1632 | if (up->bugs & UART_BUG_NOMSR) | 1630 | if (up->bugs & UART_BUG_NOMSR) |
1633 | return; | 1631 | return; |
1634 | 1632 | ||
1635 | mctrl_gpio_enable_ms(up->gpios); | ||
1636 | |||
1637 | up->ier |= UART_IER_MSI; | 1633 | up->ier |= UART_IER_MSI; |
1638 | 1634 | ||
1639 | serial8250_rpm_get(up); | 1635 | serial8250_rpm_get(up); |
@@ -1917,8 +1913,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port) | |||
1917 | ret |= TIOCM_DSR; | 1913 | ret |= TIOCM_DSR; |
1918 | if (status & UART_MSR_CTS) | 1914 | if (status & UART_MSR_CTS) |
1919 | ret |= TIOCM_CTS; | 1915 | ret |= TIOCM_CTS; |
1920 | 1916 | return ret; | |
1921 | return mctrl_gpio_get(up->gpios, &ret); | ||
1922 | } | 1917 | } |
1923 | EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl); | 1918 | EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl); |
1924 | 1919 | ||
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index c9ec839a5ddf..7c6f7afca5dd 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig | |||
@@ -6,7 +6,6 @@ | |||
6 | config SERIAL_8250 | 6 | config SERIAL_8250 |
7 | tristate "8250/16550 and compatible serial support" | 7 | tristate "8250/16550 and compatible serial support" |
8 | select SERIAL_CORE | 8 | select SERIAL_CORE |
9 | select SERIAL_MCTRL_GPIO if GPIOLIB | ||
10 | ---help--- | 9 | ---help--- |
11 | This selects whether you want to include the driver for the standard | 10 | This selects whether you want to include the driver for the standard |
12 | serial ports. The standard answer is Y. People who might say N | 11 | serial ports. The standard answer is Y. People who might say N |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 065f5d97aa67..b93356834bb5 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -949,6 +949,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci) | |||
949 | int retval; | 949 | int retval; |
950 | struct ci_hw_ep *hwep; | 950 | struct ci_hw_ep *hwep; |
951 | 951 | ||
952 | /* | ||
953 | * Unexpected USB controller behavior, caused by bad signal integrity | ||
954 | * or ground reference problems, can lead to isr_setup_status_phase | ||
955 | * being called with ci->status equal to NULL. | ||
956 | * If this situation occurs, you should review your USB hardware design. | ||
957 | */ | ||
958 | if (WARN_ON_ONCE(!ci->status)) | ||
959 | return -EPIPE; | ||
960 | |||
952 | hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; | 961 | hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in; |
953 | ci->status->context = ci; | 962 | ci->status->context = ci; |
954 | ci->status->complete = isr_setup_status_complete; | 963 | ci->status->complete = isr_setup_status_complete; |
@@ -1596,8 +1605,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on) | |||
1596 | { | 1605 | { |
1597 | struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget); | 1606 | struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget); |
1598 | 1607 | ||
1599 | /* Data+ pullup controlled by OTG state machine in OTG fsm mode */ | 1608 | /* |
1600 | if (ci_otg_is_fsm_mode(ci)) | 1609 | * Data+ pullup controlled by OTG state machine in OTG fsm mode; |
1610 | * and don't touch Data+ in host mode for dual role config. | ||
1611 | */ | ||
1612 | if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST) | ||
1601 | return 0; | 1613 | return 0; |
1602 | 1614 | ||
1603 | pm_runtime_get_sync(&ci->gadget.dev); | 1615 | pm_runtime_get_sync(&ci->gadget.dev); |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 71912301ef7f..0f3f62e81e5b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1354,7 +1354,6 @@ made_compressed_probe: | |||
1354 | spin_lock_init(&acm->write_lock); | 1354 | spin_lock_init(&acm->write_lock); |
1355 | spin_lock_init(&acm->read_lock); | 1355 | spin_lock_init(&acm->read_lock); |
1356 | mutex_init(&acm->mutex); | 1356 | mutex_init(&acm->mutex); |
1357 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); | ||
1358 | acm->is_int_ep = usb_endpoint_xfer_int(epread); | 1357 | acm->is_int_ep = usb_endpoint_xfer_int(epread); |
1359 | if (acm->is_int_ep) | 1358 | if (acm->is_int_ep) |
1360 | acm->bInterval = epread->bInterval; | 1359 | acm->bInterval = epread->bInterval; |
@@ -1394,14 +1393,14 @@ made_compressed_probe: | |||
1394 | urb->transfer_dma = rb->dma; | 1393 | urb->transfer_dma = rb->dma; |
1395 | if (acm->is_int_ep) { | 1394 | if (acm->is_int_ep) { |
1396 | usb_fill_int_urb(urb, acm->dev, | 1395 | usb_fill_int_urb(urb, acm->dev, |
1397 | acm->rx_endpoint, | 1396 | usb_rcvintpipe(usb_dev, epread->bEndpointAddress), |
1398 | rb->base, | 1397 | rb->base, |
1399 | acm->readsize, | 1398 | acm->readsize, |
1400 | acm_read_bulk_callback, rb, | 1399 | acm_read_bulk_callback, rb, |
1401 | acm->bInterval); | 1400 | acm->bInterval); |
1402 | } else { | 1401 | } else { |
1403 | usb_fill_bulk_urb(urb, acm->dev, | 1402 | usb_fill_bulk_urb(urb, acm->dev, |
1404 | acm->rx_endpoint, | 1403 | usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), |
1405 | rb->base, | 1404 | rb->base, |
1406 | acm->readsize, | 1405 | acm->readsize, |
1407 | acm_read_bulk_callback, rb); | 1406 | acm_read_bulk_callback, rb); |
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 05ce308d5d2a..1f1eabfd8462 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
@@ -96,7 +96,6 @@ struct acm { | |||
96 | struct acm_rb read_buffers[ACM_NR]; | 96 | struct acm_rb read_buffers[ACM_NR]; |
97 | struct acm_wb *putbuffer; /* for acm_tty_put_char() */ | 97 | struct acm_wb *putbuffer; /* for acm_tty_put_char() */ |
98 | int rx_buflimit; | 98 | int rx_buflimit; |
99 | int rx_endpoint; | ||
100 | spinlock_t read_lock; | 99 | spinlock_t read_lock; |
101 | int write_used; /* number of non-empty write buffers */ | 100 | int write_used; /* number of non-empty write buffers */ |
102 | int transmitting; | 101 | int transmitting; |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 31ccdccd7a04..a2d90aca779f 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
171 | ep, buffer, size); | 171 | ep, buffer, size); |
172 | } | 172 | } |
173 | 173 | ||
174 | static const unsigned short low_speed_maxpacket_maxes[4] = { | ||
175 | [USB_ENDPOINT_XFER_CONTROL] = 8, | ||
176 | [USB_ENDPOINT_XFER_ISOC] = 0, | ||
177 | [USB_ENDPOINT_XFER_BULK] = 0, | ||
178 | [USB_ENDPOINT_XFER_INT] = 8, | ||
179 | }; | ||
180 | static const unsigned short full_speed_maxpacket_maxes[4] = { | ||
181 | [USB_ENDPOINT_XFER_CONTROL] = 64, | ||
182 | [USB_ENDPOINT_XFER_ISOC] = 1023, | ||
183 | [USB_ENDPOINT_XFER_BULK] = 64, | ||
184 | [USB_ENDPOINT_XFER_INT] = 64, | ||
185 | }; | ||
186 | static const unsigned short high_speed_maxpacket_maxes[4] = { | ||
187 | [USB_ENDPOINT_XFER_CONTROL] = 64, | ||
188 | [USB_ENDPOINT_XFER_ISOC] = 1024, | ||
189 | [USB_ENDPOINT_XFER_BULK] = 512, | ||
190 | [USB_ENDPOINT_XFER_INT] = 1024, | ||
191 | }; | ||
192 | static const unsigned short super_speed_maxpacket_maxes[4] = { | ||
193 | [USB_ENDPOINT_XFER_CONTROL] = 512, | ||
194 | [USB_ENDPOINT_XFER_ISOC] = 1024, | ||
195 | [USB_ENDPOINT_XFER_BULK] = 1024, | ||
196 | [USB_ENDPOINT_XFER_INT] = 1024, | ||
197 | }; | ||
198 | |||
174 | static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | 199 | static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, |
175 | int asnum, struct usb_host_interface *ifp, int num_ep, | 200 | int asnum, struct usb_host_interface *ifp, int num_ep, |
176 | unsigned char *buffer, int size) | 201 | unsigned char *buffer, int size) |
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
179 | struct usb_endpoint_descriptor *d; | 204 | struct usb_endpoint_descriptor *d; |
180 | struct usb_host_endpoint *endpoint; | 205 | struct usb_host_endpoint *endpoint; |
181 | int n, i, j, retval; | 206 | int n, i, j, retval; |
207 | unsigned int maxp; | ||
208 | const unsigned short *maxpacket_maxes; | ||
182 | 209 | ||
183 | d = (struct usb_endpoint_descriptor *) buffer; | 210 | d = (struct usb_endpoint_descriptor *) buffer; |
184 | buffer += d->bLength; | 211 | buffer += d->bLength; |
@@ -213,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
213 | memcpy(&endpoint->desc, d, n); | 240 | memcpy(&endpoint->desc, d, n); |
214 | INIT_LIST_HEAD(&endpoint->urb_list); | 241 | INIT_LIST_HEAD(&endpoint->urb_list); |
215 | 242 | ||
216 | /* Fix up bInterval values outside the legal range. Use 32 ms if no | 243 | /* |
217 | * proper value can be guessed. */ | 244 | * Fix up bInterval values outside the legal range. |
245 | * Use 10 or 8 ms if no proper value can be guessed. | ||
246 | */ | ||
218 | i = 0; /* i = min, j = max, n = default */ | 247 | i = 0; /* i = min, j = max, n = default */ |
219 | j = 255; | 248 | j = 255; |
220 | if (usb_endpoint_xfer_int(d)) { | 249 | if (usb_endpoint_xfer_int(d)) { |
@@ -223,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
223 | case USB_SPEED_SUPER_PLUS: | 252 | case USB_SPEED_SUPER_PLUS: |
224 | case USB_SPEED_SUPER: | 253 | case USB_SPEED_SUPER: |
225 | case USB_SPEED_HIGH: | 254 | case USB_SPEED_HIGH: |
226 | /* Many device manufacturers are using full-speed | 255 | /* |
256 | * Many device manufacturers are using full-speed | ||
227 | * bInterval values in high-speed interrupt endpoint | 257 | * bInterval values in high-speed interrupt endpoint |
228 | * descriptors. Try to fix those and fall back to a | 258 | * descriptors. Try to fix those and fall back to an |
229 | * 32 ms default value otherwise. */ | 259 | * 8-ms default value otherwise. |
260 | */ | ||
230 | n = fls(d->bInterval*8); | 261 | n = fls(d->bInterval*8); |
231 | if (n == 0) | 262 | if (n == 0) |
232 | n = 9; /* 32 ms = 2^(9-1) uframes */ | 263 | n = 7; /* 8 ms = 2^(7-1) uframes */ |
233 | j = 16; | 264 | j = 16; |
234 | 265 | ||
235 | /* | 266 | /* |
@@ -244,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
244 | } | 275 | } |
245 | break; | 276 | break; |
246 | default: /* USB_SPEED_FULL or _LOW */ | 277 | default: /* USB_SPEED_FULL or _LOW */ |
247 | /* For low-speed, 10 ms is the official minimum. | 278 | /* |
279 | * For low-speed, 10 ms is the official minimum. | ||
248 | * But some "overclocked" devices might want faster | 280 | * But some "overclocked" devices might want faster |
249 | * polling so we'll allow it. */ | 281 | * polling so we'll allow it. |
250 | n = 32; | 282 | */ |
283 | n = 10; | ||
251 | break; | 284 | break; |
252 | } | 285 | } |
253 | } else if (usb_endpoint_xfer_isoc(d)) { | 286 | } else if (usb_endpoint_xfer_isoc(d)) { |
@@ -255,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
255 | j = 16; | 288 | j = 16; |
256 | switch (to_usb_device(ddev)->speed) { | 289 | switch (to_usb_device(ddev)->speed) { |
257 | case USB_SPEED_HIGH: | 290 | case USB_SPEED_HIGH: |
258 | n = 9; /* 32 ms = 2^(9-1) uframes */ | 291 | n = 7; /* 8 ms = 2^(7-1) uframes */ |
259 | break; | 292 | break; |
260 | default: /* USB_SPEED_FULL */ | 293 | default: /* USB_SPEED_FULL */ |
261 | n = 6; /* 32 ms = 2^(6-1) frames */ | 294 | n = 4; /* 8 ms = 2^(4-1) frames */ |
262 | break; | 295 | break; |
263 | } | 296 | } |
264 | } | 297 | } |
@@ -286,6 +319,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
286 | endpoint->desc.wMaxPacketSize = cpu_to_le16(8); | 319 | endpoint->desc.wMaxPacketSize = cpu_to_le16(8); |
287 | } | 320 | } |
288 | 321 | ||
322 | /* Validate the wMaxPacketSize field */ | ||
323 | maxp = usb_endpoint_maxp(&endpoint->desc); | ||
324 | |||
325 | /* Find the highest legal maxpacket size for this endpoint */ | ||
326 | i = 0; /* additional transactions per microframe */ | ||
327 | switch (to_usb_device(ddev)->speed) { | ||
328 | case USB_SPEED_LOW: | ||
329 | maxpacket_maxes = low_speed_maxpacket_maxes; | ||
330 | break; | ||
331 | case USB_SPEED_FULL: | ||
332 | maxpacket_maxes = full_speed_maxpacket_maxes; | ||
333 | break; | ||
334 | case USB_SPEED_HIGH: | ||
335 | /* Bits 12..11 are allowed only for HS periodic endpoints */ | ||
336 | if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { | ||
337 | i = maxp & (BIT(12) | BIT(11)); | ||
338 | maxp &= ~i; | ||
339 | } | ||
340 | /* fallthrough */ | ||
341 | default: | ||
342 | maxpacket_maxes = high_speed_maxpacket_maxes; | ||
343 | break; | ||
344 | case USB_SPEED_SUPER: | ||
345 | case USB_SPEED_SUPER_PLUS: | ||
346 | maxpacket_maxes = super_speed_maxpacket_maxes; | ||
347 | break; | ||
348 | } | ||
349 | j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; | ||
350 | |||
351 | if (maxp > j) { | ||
352 | dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", | ||
353 | cfgno, inum, asnum, d->bEndpointAddress, maxp, j); | ||
354 | maxp = j; | ||
355 | endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); | ||
356 | } | ||
357 | |||
289 | /* | 358 | /* |
290 | * Some buggy high speed devices have bulk endpoints using | 359 | * Some buggy high speed devices have bulk endpoints using |
291 | * maxpacket sizes other than 512. High speed HCDs may not | 360 | * maxpacket sizes other than 512. High speed HCDs may not |
@@ -293,9 +362,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
293 | */ | 362 | */ |
294 | if (to_usb_device(ddev)->speed == USB_SPEED_HIGH | 363 | if (to_usb_device(ddev)->speed == USB_SPEED_HIGH |
295 | && usb_endpoint_xfer_bulk(d)) { | 364 | && usb_endpoint_xfer_bulk(d)) { |
296 | unsigned maxp; | ||
297 | |||
298 | maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; | ||
299 | if (maxp != 512) | 365 | if (maxp != 512) |
300 | dev_warn(ddev, "config %d interface %d altsetting %d " | 366 | dev_warn(ddev, "config %d interface %d altsetting %d " |
301 | "bulk endpoint 0x%X has invalid maxpacket %d\n", | 367 | "bulk endpoint 0x%X has invalid maxpacket %d\n", |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9f5043a2167..09c8d9ca61ae 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) | |||
241 | goto error_decrease_mem; | 241 | goto error_decrease_mem; |
242 | } | 242 | } |
243 | 243 | ||
244 | mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle); | 244 | mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, |
245 | &dma_handle); | ||
245 | if (!mem) { | 246 | if (!mem) { |
246 | ret = -ENOMEM; | 247 | ret = -ENOMEM; |
247 | goto error_free_usbm; | 248 | goto error_free_usbm; |
@@ -1708,11 +1709,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1708 | as->urb->start_frame = uurb->start_frame; | 1709 | as->urb->start_frame = uurb->start_frame; |
1709 | as->urb->number_of_packets = number_of_packets; | 1710 | as->urb->number_of_packets = number_of_packets; |
1710 | as->urb->stream_id = stream_id; | 1711 | as->urb->stream_id = stream_id; |
1711 | if (uurb->type == USBDEVFS_URB_TYPE_ISO || | 1712 | |
1712 | ps->dev->speed == USB_SPEED_HIGH) | 1713 | if (ep->desc.bInterval) { |
1713 | as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); | 1714 | if (uurb->type == USBDEVFS_URB_TYPE_ISO || |
1714 | else | 1715 | ps->dev->speed == USB_SPEED_HIGH || |
1715 | as->urb->interval = ep->desc.bInterval; | 1716 | ps->dev->speed >= USB_SPEED_SUPER) |
1717 | as->urb->interval = 1 << | ||
1718 | min(15, ep->desc.bInterval - 1); | ||
1719 | else | ||
1720 | as->urb->interval = ep->desc.bInterval; | ||
1721 | } | ||
1722 | |||
1716 | as->urb->context = as; | 1723 | as->urb->context = as; |
1717 | as->urb->complete = async_completed; | 1724 | as->urb->complete = async_completed; |
1718 | for (totlen = u = 0; u < number_of_packets; u++) { | 1725 | for (totlen = u = 0; u < number_of_packets; u++) { |
@@ -2582,7 +2589,9 @@ static unsigned int usbdev_poll(struct file *file, | |||
2582 | if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) | 2589 | if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) |
2583 | mask |= POLLOUT | POLLWRNORM; | 2590 | mask |= POLLOUT | POLLWRNORM; |
2584 | if (!connected(ps)) | 2591 | if (!connected(ps)) |
2585 | mask |= POLLERR | POLLHUP; | 2592 | mask |= POLLHUP; |
2593 | if (list_empty(&ps->list)) | ||
2594 | mask |= POLLERR; | ||
2586 | return mask; | 2595 | return mask; |
2587 | } | 2596 | } |
2588 | 2597 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bee13517676f..1d5fc32d06d0 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1052 | 1052 | ||
1053 | /* Continue a partial initialization */ | 1053 | /* Continue a partial initialization */ |
1054 | if (type == HUB_INIT2 || type == HUB_INIT3) { | 1054 | if (type == HUB_INIT2 || type == HUB_INIT3) { |
1055 | device_lock(hub->intfdev); | 1055 | device_lock(&hdev->dev); |
1056 | 1056 | ||
1057 | /* Was the hub disconnected while we were waiting? */ | 1057 | /* Was the hub disconnected while we were waiting? */ |
1058 | if (hub->disconnected) { | 1058 | if (hub->disconnected) |
1059 | device_unlock(hub->intfdev); | 1059 | goto disconnected; |
1060 | kref_put(&hub->kref, hub_release); | ||
1061 | return; | ||
1062 | } | ||
1063 | if (type == HUB_INIT2) | 1060 | if (type == HUB_INIT2) |
1064 | goto init2; | 1061 | goto init2; |
1065 | goto init3; | 1062 | goto init3; |
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1262 | queue_delayed_work(system_power_efficient_wq, | 1259 | queue_delayed_work(system_power_efficient_wq, |
1263 | &hub->init_work, | 1260 | &hub->init_work, |
1264 | msecs_to_jiffies(delay)); | 1261 | msecs_to_jiffies(delay)); |
1265 | device_unlock(hub->intfdev); | 1262 | device_unlock(&hdev->dev); |
1266 | return; /* Continues at init3: below */ | 1263 | return; /* Continues at init3: below */ |
1267 | } else { | 1264 | } else { |
1268 | msleep(delay); | 1265 | msleep(delay); |
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1281 | /* Scan all ports that need attention */ | 1278 | /* Scan all ports that need attention */ |
1282 | kick_hub_wq(hub); | 1279 | kick_hub_wq(hub); |
1283 | 1280 | ||
1284 | /* Allow autosuspend if it was suppressed */ | 1281 | if (type == HUB_INIT2 || type == HUB_INIT3) { |
1285 | if (type <= HUB_INIT3) | 1282 | /* Allow autosuspend if it was suppressed */ |
1283 | disconnected: | ||
1286 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); | 1284 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); |
1287 | 1285 | device_unlock(&hdev->dev); | |
1288 | if (type == HUB_INIT2 || type == HUB_INIT3) | 1286 | } |
1289 | device_unlock(hub->intfdev); | ||
1290 | 1287 | ||
1291 | kref_put(&hub->kref, hub_release); | 1288 | kref_put(&hub->kref, hub_release); |
1292 | } | 1289 | } |
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) | |||
1315 | struct usb_device *hdev = hub->hdev; | 1312 | struct usb_device *hdev = hub->hdev; |
1316 | int i; | 1313 | int i; |
1317 | 1314 | ||
1318 | cancel_delayed_work_sync(&hub->init_work); | ||
1319 | |||
1320 | /* hub_wq and related activity won't re-trigger */ | 1315 | /* hub_wq and related activity won't re-trigger */ |
1321 | hub->quiescing = 1; | 1316 | hub->quiescing = 1; |
1322 | 1317 | ||
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 9fae0291cd69..d64551243789 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h | |||
@@ -868,6 +868,7 @@ struct dwc2_hsotg { | |||
868 | void *priv; | 868 | void *priv; |
869 | int irq; | 869 | int irq; |
870 | struct clk *clk; | 870 | struct clk *clk; |
871 | struct reset_control *reset; | ||
871 | 872 | ||
872 | unsigned int queuing_high_bandwidth:1; | 873 | unsigned int queuing_high_bandwidth:1; |
873 | unsigned int srp_success:1; | 874 | unsigned int srp_success:1; |
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index fc6f5251de5d..530959a8a6d1 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/platform_device.h> | 45 | #include <linux/platform_device.h> |
46 | #include <linux/phy/phy.h> | 46 | #include <linux/phy/phy.h> |
47 | #include <linux/platform_data/s3c-hsotg.h> | 47 | #include <linux/platform_data/s3c-hsotg.h> |
48 | #include <linux/reset.h> | ||
48 | 49 | ||
49 | #include <linux/usb/of.h> | 50 | #include <linux/usb/of.h> |
50 | 51 | ||
@@ -337,6 +338,24 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg) | |||
337 | { | 338 | { |
338 | int i, ret; | 339 | int i, ret; |
339 | 340 | ||
341 | hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2"); | ||
342 | if (IS_ERR(hsotg->reset)) { | ||
343 | ret = PTR_ERR(hsotg->reset); | ||
344 | switch (ret) { | ||
345 | case -ENOENT: | ||
346 | case -ENOTSUPP: | ||
347 | hsotg->reset = NULL; | ||
348 | break; | ||
349 | default: | ||
350 | dev_err(hsotg->dev, "error getting reset control %d\n", | ||
351 | ret); | ||
352 | return ret; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | if (hsotg->reset) | ||
357 | reset_control_deassert(hsotg->reset); | ||
358 | |||
340 | /* Set default UTMI width */ | 359 | /* Set default UTMI width */ |
341 | hsotg->phyif = GUSBCFG_PHYIF16; | 360 | hsotg->phyif = GUSBCFG_PHYIF16; |
342 | 361 | ||
@@ -434,6 +453,9 @@ static int dwc2_driver_remove(struct platform_device *dev) | |||
434 | if (hsotg->ll_hw_enabled) | 453 | if (hsotg->ll_hw_enabled) |
435 | dwc2_lowlevel_hw_disable(hsotg); | 454 | dwc2_lowlevel_hw_disable(hsotg); |
436 | 455 | ||
456 | if (hsotg->reset) | ||
457 | reset_control_assert(hsotg->reset); | ||
458 | |||
437 | return 0; | 459 | return 0; |
438 | } | 460 | } |
439 | 461 | ||
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 946643157b78..35d092456bec 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
@@ -1192,6 +1192,7 @@ static int dwc3_runtime_resume(struct device *dev) | |||
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | pm_runtime_mark_last_busy(dev); | 1194 | pm_runtime_mark_last_busy(dev); |
1195 | pm_runtime_put(dev); | ||
1195 | 1196 | ||
1196 | return 0; | 1197 | return 0; |
1197 | } | 1198 | } |
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index 22dfc3dd6a13..33ab2a203c1b 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h | |||
@@ -192,7 +192,7 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event) | |||
192 | int ret; | 192 | int ret; |
193 | 193 | ||
194 | ret = sprintf(str, "ep%d%s: ", epnum >> 1, | 194 | ret = sprintf(str, "ep%d%s: ", epnum >> 1, |
195 | (epnum & 1) ? "in" : "in"); | 195 | (epnum & 1) ? "in" : "out"); |
196 | if (ret < 0) | 196 | if (ret < 0) |
197 | return "UNKNOWN"; | 197 | return "UNKNOWN"; |
198 | 198 | ||
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 974335377d9f..e56d59b19a0e 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev) | |||
61 | if (!simple->clks) | 61 | if (!simple->clks) |
62 | return -ENOMEM; | 62 | return -ENOMEM; |
63 | 63 | ||
64 | platform_set_drvdata(pdev, simple); | ||
64 | simple->dev = dev; | 65 | simple->dev = dev; |
65 | 66 | ||
66 | for (i = 0; i < simple->num_clocks; i++) { | 67 | for (i = 0; i < simple->num_clocks; i++) { |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 45f5a232d9fb..6df0f5dad9a4 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa | 37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa |
38 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa | 38 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa |
39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa | 39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa |
40 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 | ||
40 | 41 | ||
41 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; | 42 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; |
42 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; | 43 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; |
@@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |||
227 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, | 228 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, |
228 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, | 229 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, |
229 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, | 230 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, |
231 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, | ||
230 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, | 232 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, |
231 | { } /* Terminating Entry */ | 233 | { } /* Terminating Entry */ |
232 | }; | 234 | }; |
@@ -241,6 +243,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev) | |||
241 | return -EBUSY; | 243 | return -EBUSY; |
242 | } | 244 | } |
243 | 245 | ||
246 | static int dwc3_pci_runtime_resume(struct device *dev) | ||
247 | { | ||
248 | struct platform_device *dwc3 = dev_get_drvdata(dev); | ||
249 | |||
250 | return pm_runtime_get(&dwc3->dev); | ||
251 | } | ||
252 | #endif /* CONFIG_PM */ | ||
253 | |||
254 | #ifdef CONFIG_PM_SLEEP | ||
244 | static int dwc3_pci_pm_dummy(struct device *dev) | 255 | static int dwc3_pci_pm_dummy(struct device *dev) |
245 | { | 256 | { |
246 | /* | 257 | /* |
@@ -253,11 +264,11 @@ static int dwc3_pci_pm_dummy(struct device *dev) | |||
253 | */ | 264 | */ |
254 | return 0; | 265 | return 0; |
255 | } | 266 | } |
256 | #endif /* CONFIG_PM */ | 267 | #endif /* CONFIG_PM_SLEEP */ |
257 | 268 | ||
258 | static struct dev_pm_ops dwc3_pci_dev_pm_ops = { | 269 | static struct dev_pm_ops dwc3_pci_dev_pm_ops = { |
259 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy) | 270 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy) |
260 | SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy, | 271 | SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume, |
261 | NULL) | 272 | NULL) |
262 | }; | 273 | }; |
263 | 274 | ||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8f8c2157910e..122e64df2f4d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, | |||
829 | if (!req->request.no_interrupt && !chain) | 829 | if (!req->request.no_interrupt && !chain) |
830 | trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; | 830 | trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; |
831 | 831 | ||
832 | if (last) | 832 | if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc)) |
833 | trb->ctrl |= DWC3_TRB_CTRL_LST; | 833 | trb->ctrl |= DWC3_TRB_CTRL_LST; |
834 | 834 | ||
835 | if (chain) | 835 | if (chain) |
@@ -1433,7 +1433,7 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g) | |||
1433 | 1433 | ||
1434 | static int __dwc3_gadget_wakeup(struct dwc3 *dwc) | 1434 | static int __dwc3_gadget_wakeup(struct dwc3 *dwc) |
1435 | { | 1435 | { |
1436 | unsigned long timeout; | 1436 | int retries; |
1437 | 1437 | ||
1438 | int ret; | 1438 | int ret; |
1439 | u32 reg; | 1439 | u32 reg; |
@@ -1484,9 +1484,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) | |||
1484 | } | 1484 | } |
1485 | 1485 | ||
1486 | /* poll until Link State changes to ON */ | 1486 | /* poll until Link State changes to ON */ |
1487 | timeout = jiffies + msecs_to_jiffies(100); | 1487 | retries = 20000; |
1488 | 1488 | ||
1489 | while (!time_after(jiffies, timeout)) { | 1489 | while (retries--) { |
1490 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); | 1490 | reg = dwc3_readl(dwc->regs, DWC3_DSTS); |
1491 | 1491 | ||
1492 | /* in HS, means ON */ | 1492 | /* in HS, means ON */ |
@@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) | |||
1955 | 1955 | ||
1956 | static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | 1956 | static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, |
1957 | struct dwc3_request *req, struct dwc3_trb *trb, | 1957 | struct dwc3_request *req, struct dwc3_trb *trb, |
1958 | const struct dwc3_event_depevt *event, int status) | 1958 | const struct dwc3_event_depevt *event, int status, |
1959 | int chain) | ||
1959 | { | 1960 | { |
1960 | unsigned int count; | 1961 | unsigned int count; |
1961 | unsigned int s_pkt = 0; | 1962 | unsigned int s_pkt = 0; |
@@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
1964 | dep->queued_requests--; | 1965 | dep->queued_requests--; |
1965 | trace_dwc3_complete_trb(dep, trb); | 1966 | trace_dwc3_complete_trb(dep, trb); |
1966 | 1967 | ||
1968 | /* | ||
1969 | * If we're in the middle of series of chained TRBs and we | ||
1970 | * receive a short transfer along the way, DWC3 will skip | ||
1971 | * through all TRBs including the last TRB in the chain (the | ||
1972 | * where CHN bit is zero. DWC3 will also avoid clearing HWO | ||
1973 | * bit and SW has to do it manually. | ||
1974 | * | ||
1975 | * We're going to do that here to avoid problems of HW trying | ||
1976 | * to use bogus TRBs for transfers. | ||
1977 | */ | ||
1978 | if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) | ||
1979 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
1980 | |||
1967 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) | 1981 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) |
1968 | /* | 1982 | return 1; |
1969 | * We continue despite the error. There is not much we | 1983 | |
1970 | * can do. If we don't clean it up we loop forever. If | ||
1971 | * we skip the TRB then it gets overwritten after a | ||
1972 | * while since we use them in a ring buffer. A BUG() | ||
1973 | * would help. Lets hope that if this occurs, someone | ||
1974 | * fixes the root cause instead of looking away :) | ||
1975 | */ | ||
1976 | dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", | ||
1977 | dep->name, trb); | ||
1978 | count = trb->size & DWC3_TRB_SIZE_MASK; | 1984 | count = trb->size & DWC3_TRB_SIZE_MASK; |
1979 | 1985 | ||
1980 | if (dep->direction) { | 1986 | if (dep->direction) { |
@@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2013 | s_pkt = 1; | 2019 | s_pkt = 1; |
2014 | } | 2020 | } |
2015 | 2021 | ||
2016 | /* | 2022 | if (s_pkt && !chain) |
2017 | * We assume here we will always receive the entire data block | ||
2018 | * which we should receive. Meaning, if we program RX to | ||
2019 | * receive 4K but we receive only 2K, we assume that's all we | ||
2020 | * should receive and we simply bounce the request back to the | ||
2021 | * gadget driver for further processing. | ||
2022 | */ | ||
2023 | req->request.actual += req->request.length - count; | ||
2024 | if (s_pkt) | ||
2025 | return 1; | 2023 | return 1; |
2026 | if ((event->status & DEPEVT_STATUS_LST) && | 2024 | if ((event->status & DEPEVT_STATUS_LST) && |
2027 | (trb->ctrl & (DWC3_TRB_CTRL_LST | | 2025 | (trb->ctrl & (DWC3_TRB_CTRL_LST | |
@@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2040 | struct dwc3_trb *trb; | 2038 | struct dwc3_trb *trb; |
2041 | unsigned int slot; | 2039 | unsigned int slot; |
2042 | unsigned int i; | 2040 | unsigned int i; |
2041 | int count = 0; | ||
2043 | int ret; | 2042 | int ret; |
2044 | 2043 | ||
2045 | do { | 2044 | do { |
2045 | int chain; | ||
2046 | |||
2046 | req = next_request(&dep->started_list); | 2047 | req = next_request(&dep->started_list); |
2047 | if (WARN_ON_ONCE(!req)) | 2048 | if (WARN_ON_ONCE(!req)) |
2048 | return 1; | 2049 | return 1; |
2049 | 2050 | ||
2051 | chain = req->request.num_mapped_sgs > 0; | ||
2050 | i = 0; | 2052 | i = 0; |
2051 | do { | 2053 | do { |
2052 | slot = req->first_trb_index + i; | 2054 | slot = req->first_trb_index + i; |
@@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2054 | slot++; | 2056 | slot++; |
2055 | slot %= DWC3_TRB_NUM; | 2057 | slot %= DWC3_TRB_NUM; |
2056 | trb = &dep->trb_pool[slot]; | 2058 | trb = &dep->trb_pool[slot]; |
2059 | count += trb->size & DWC3_TRB_SIZE_MASK; | ||
2057 | 2060 | ||
2058 | ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, | 2061 | ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, |
2059 | event, status); | 2062 | event, status, chain); |
2060 | if (ret) | 2063 | if (ret) |
2061 | break; | 2064 | break; |
2062 | } while (++i < req->request.num_mapped_sgs); | 2065 | } while (++i < req->request.num_mapped_sgs); |
2063 | 2066 | ||
2067 | /* | ||
2068 | * We assume here we will always receive the entire data block | ||
2069 | * which we should receive. Meaning, if we program RX to | ||
2070 | * receive 4K but we receive only 2K, we assume that's all we | ||
2071 | * should receive and we simply bounce the request back to the | ||
2072 | * gadget driver for further processing. | ||
2073 | */ | ||
2074 | req->request.actual += req->request.length - count; | ||
2064 | dwc3_gadget_giveback(dep, req, status); | 2075 | dwc3_gadget_giveback(dep, req, status); |
2065 | 2076 | ||
2066 | if (ret) | 2077 | if (ret) |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index eb648485a58c..5ebe6af7976e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -1913,6 +1913,8 @@ unknown: | |||
1913 | break; | 1913 | break; |
1914 | 1914 | ||
1915 | case USB_RECIP_ENDPOINT: | 1915 | case USB_RECIP_ENDPOINT: |
1916 | if (!cdev->config) | ||
1917 | break; | ||
1916 | endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); | 1918 | endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); |
1917 | list_for_each_entry(f, &cdev->config->functions, list) { | 1919 | list_for_each_entry(f, &cdev->config->functions, list) { |
1918 | if (test_bit(endp, f->endpoints)) | 1920 | if (test_bit(endp, f->endpoints)) |
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, | |||
2124 | 2126 | ||
2125 | cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); | 2127 | cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); |
2126 | if (!cdev->os_desc_req) { | 2128 | if (!cdev->os_desc_req) { |
2127 | ret = PTR_ERR(cdev->os_desc_req); | 2129 | ret = -ENOMEM; |
2128 | goto end; | 2130 | goto end; |
2129 | } | 2131 | } |
2130 | 2132 | ||
2131 | /* OS feature descriptor length <= 4kB */ | 2133 | /* OS feature descriptor length <= 4kB */ |
2132 | cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); | 2134 | cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); |
2133 | if (!cdev->os_desc_req->buf) { | 2135 | if (!cdev->os_desc_req->buf) { |
2134 | ret = PTR_ERR(cdev->os_desc_req->buf); | 2136 | ret = -ENOMEM; |
2135 | kfree(cdev->os_desc_req); | 2137 | kfree(cdev->os_desc_req); |
2136 | goto end; | 2138 | goto end; |
2137 | } | 2139 | } |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 70cf3477f951..f9237fe2be05 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item) | |||
1490 | { | 1490 | { |
1491 | struct gadget_info *gi = to_gadget_info(item); | 1491 | struct gadget_info *gi = to_gadget_info(item); |
1492 | 1492 | ||
1493 | mutex_lock(&gi->lock); | ||
1493 | unregister_gadget(gi); | 1494 | unregister_gadget(gi); |
1495 | mutex_unlock(&gi->lock); | ||
1494 | } | 1496 | } |
1495 | EXPORT_SYMBOL_GPL(unregister_gadget_item); | 1497 | EXPORT_SYMBOL_GPL(unregister_gadget_item); |
1496 | 1498 | ||
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c index d58bfc32be9e..007ec6e4a5d4 100644 --- a/drivers/usb/gadget/function/f_eem.c +++ b/drivers/usb/gadget/function/f_eem.c | |||
@@ -341,11 +341,15 @@ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb) | |||
341 | { | 341 | { |
342 | struct sk_buff *skb2 = NULL; | 342 | struct sk_buff *skb2 = NULL; |
343 | struct usb_ep *in = port->in_ep; | 343 | struct usb_ep *in = port->in_ep; |
344 | int padlen = 0; | 344 | int headroom, tailroom, padlen = 0; |
345 | u16 len = skb->len; | 345 | u16 len; |
346 | 346 | ||
347 | int headroom = skb_headroom(skb); | 347 | if (!skb) |
348 | int tailroom = skb_tailroom(skb); | 348 | return NULL; |
349 | |||
350 | len = skb->len; | ||
351 | headroom = skb_headroom(skb); | ||
352 | tailroom = skb_tailroom(skb); | ||
349 | 353 | ||
350 | /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, | 354 | /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, |
351 | * stick two bytes of zero-length EEM packet on the end. | 355 | * stick two bytes of zero-length EEM packet on the end. |
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index c8005823b190..16562e461121 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c | |||
@@ -374,6 +374,9 @@ static struct sk_buff *rndis_add_header(struct gether *port, | |||
374 | { | 374 | { |
375 | struct sk_buff *skb2; | 375 | struct sk_buff *skb2; |
376 | 376 | ||
377 | if (!skb) | ||
378 | return NULL; | ||
379 | |||
377 | skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type)); | 380 | skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type)); |
378 | rndis_add_hdr(skb2); | 381 | rndis_add_hdr(skb2); |
379 | 382 | ||
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 943c21aafd3b..ab6ac1b74ac0 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c | |||
@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params, | |||
680 | { | 680 | { |
681 | rndis_reset_cmplt_type *resp; | 681 | rndis_reset_cmplt_type *resp; |
682 | rndis_resp_t *r; | 682 | rndis_resp_t *r; |
683 | u8 *xbuf; | ||
684 | u32 length; | ||
685 | |||
686 | /* drain the response queue */ | ||
687 | while ((xbuf = rndis_get_next_response(params, &length))) | ||
688 | rndis_free_response(params, xbuf); | ||
683 | 689 | ||
684 | r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); | 690 | r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); |
685 | if (!r) | 691 | if (!r) |
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index a3f7e7c55ebb..5f562c1ec795 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c | |||
@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, | |||
556 | /* Multi frame CDC protocols may store the frame for | 556 | /* Multi frame CDC protocols may store the frame for |
557 | * later which is not a dropped frame. | 557 | * later which is not a dropped frame. |
558 | */ | 558 | */ |
559 | if (dev->port_usb->supports_multi_frame) | 559 | if (dev->port_usb && |
560 | dev->port_usb->supports_multi_frame) | ||
560 | goto multiframe; | 561 | goto multiframe; |
561 | goto drop; | 562 | goto drop; |
562 | } | 563 | } |
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 6ded6345cd09..e0cd1e4c8892 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c | |||
@@ -375,10 +375,15 @@ __acquires(&port->port_lock) | |||
375 | */ | 375 | */ |
376 | { | 376 | { |
377 | struct list_head *pool = &port->write_pool; | 377 | struct list_head *pool = &port->write_pool; |
378 | struct usb_ep *in = port->port_usb->in; | 378 | struct usb_ep *in; |
379 | int status = 0; | 379 | int status = 0; |
380 | bool do_tty_wake = false; | 380 | bool do_tty_wake = false; |
381 | 381 | ||
382 | if (!port->port_usb) | ||
383 | return status; | ||
384 | |||
385 | in = port->port_usb->in; | ||
386 | |||
382 | while (!port->write_busy && !list_empty(pool)) { | 387 | while (!port->write_busy && !list_empty(pool)) { |
383 | struct usb_request *req; | 388 | struct usb_request *req; |
384 | int len; | 389 | int len; |
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 66753ba7a42e..31125a4a2658 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c | |||
@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, | |||
2023 | if (!data) { | 2023 | if (!data) { |
2024 | kfree(*class_array); | 2024 | kfree(*class_array); |
2025 | *class_array = NULL; | 2025 | *class_array = NULL; |
2026 | ret = PTR_ERR(data); | 2026 | ret = -ENOMEM; |
2027 | goto unlock; | 2027 | goto unlock; |
2028 | } | 2028 | } |
2029 | cl_arr = *class_array; | 2029 | cl_arr = *class_array; |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index aa3707bdebb4..16104b5ebdcb 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb, | |||
542 | */ | 542 | */ |
543 | spin_lock_irq(&epdata->dev->lock); | 543 | spin_lock_irq(&epdata->dev->lock); |
544 | value = -ENODEV; | 544 | value = -ENODEV; |
545 | if (unlikely(epdata->ep)) | 545 | if (unlikely(epdata->ep == NULL)) |
546 | goto fail; | 546 | goto fail; |
547 | 547 | ||
548 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 548 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
606 | } | 606 | } |
607 | if (is_sync_kiocb(iocb)) { | 607 | if (is_sync_kiocb(iocb)) { |
608 | value = ep_io(epdata, buf, len); | 608 | value = ep_io(epdata, buf, len); |
609 | if (value >= 0 && copy_to_iter(buf, value, to)) | 609 | if (value >= 0 && (copy_to_iter(buf, value, to) != value)) |
610 | value = -EFAULT; | 610 | value = -EFAULT; |
611 | } else { | 611 | } else { |
612 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | 612 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); |
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index ff8685ea7219..40c04bb25f2f 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c | |||
@@ -827,7 +827,7 @@ void usb_gadget_unmap_request_by_dev(struct device *dev, | |||
827 | return; | 827 | return; |
828 | 828 | ||
829 | if (req->num_mapped_sgs) { | 829 | if (req->num_mapped_sgs) { |
830 | dma_unmap_sg(dev, req->sg, req->num_mapped_sgs, | 830 | dma_unmap_sg(dev, req->sg, req->num_sgs, |
831 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 831 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
832 | 832 | ||
833 | req->num_mapped_sgs = 0; | 833 | req->num_mapped_sgs = 0; |
@@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1145 | if (ret != -EPROBE_DEFER) | 1145 | if (ret != -EPROBE_DEFER) |
1146 | list_del(&driver->pending); | 1146 | list_del(&driver->pending); |
1147 | if (ret) | 1147 | if (ret) |
1148 | goto err4; | 1148 | goto err5; |
1149 | break; | 1149 | break; |
1150 | } | 1150 | } |
1151 | } | 1151 | } |
@@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1154 | 1154 | ||
1155 | return 0; | 1155 | return 0; |
1156 | 1156 | ||
1157 | err5: | ||
1158 | device_del(&udc->dev); | ||
1159 | |||
1157 | err4: | 1160 | err4: |
1158 | list_del(&udc->list); | 1161 | list_del(&udc->list); |
1159 | mutex_unlock(&udc_lock); | 1162 | mutex_unlock(&udc_lock); |
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 93d28cb00b76..8bb011ea78f7 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c | |||
@@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget) | |||
1878 | 1878 | ||
1879 | tmp = in_be16(&udc->usb_param->frame_n); | 1879 | tmp = in_be16(&udc->usb_param->frame_n); |
1880 | if (tmp & 0x8000) | 1880 | if (tmp & 0x8000) |
1881 | tmp = tmp & 0x07ff; | 1881 | return tmp & 0x07ff; |
1882 | else | 1882 | return -EINVAL; |
1883 | tmp = -EINVAL; | ||
1884 | |||
1885 | return (int)tmp; | ||
1886 | } | 1883 | } |
1887 | 1884 | ||
1888 | static int fsl_qe_start(struct usb_gadget *gadget, | 1885 | static int fsl_qe_start(struct usb_gadget *gadget, |
@@ -2053,7 +2050,7 @@ static void setup_received_handle(struct qe_udc *udc, | |||
2053 | struct qe_ep *ep; | 2050 | struct qe_ep *ep; |
2054 | 2051 | ||
2055 | if (wValue != 0 || wLength != 0 | 2052 | if (wValue != 0 || wLength != 0 |
2056 | || pipe > USB_MAX_ENDPOINTS) | 2053 | || pipe >= USB_MAX_ENDPOINTS) |
2057 | break; | 2054 | break; |
2058 | ep = &udc->eps[pipe]; | 2055 | ep = &udc->eps[pipe]; |
2059 | 2056 | ||
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 93a3bec81df7..fb8fc34827ab 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
@@ -106,6 +106,7 @@ | |||
106 | 106 | ||
107 | /* DRD_CON */ | 107 | /* DRD_CON */ |
108 | #define DRD_CON_PERI_CON BIT(24) | 108 | #define DRD_CON_PERI_CON BIT(24) |
109 | #define DRD_CON_VBOUT BIT(0) | ||
109 | 110 | ||
110 | /* USB_INT_ENA_1 and USB_INT_STA_1 */ | 111 | /* USB_INT_ENA_1 and USB_INT_STA_1 */ |
111 | #define USB_INT_1_B3_PLLWKUP BIT(31) | 112 | #define USB_INT_1_B3_PLLWKUP BIT(31) |
@@ -363,6 +364,7 @@ static void usb3_init_epc_registers(struct renesas_usb3 *usb3) | |||
363 | { | 364 | { |
364 | /* FIXME: How to change host / peripheral mode as well? */ | 365 | /* FIXME: How to change host / peripheral mode as well? */ |
365 | usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON); | 366 | usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON); |
367 | usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON); | ||
366 | 368 | ||
367 | usb3_write(usb3, ~0, USB3_USB_INT_STA_1); | 369 | usb3_write(usb3, ~0, USB3_USB_INT_STA_1); |
368 | usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); | 370 | usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG); |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a962b89b65a6..1e5f529d51a2 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) | |||
332 | int port = HCS_N_PORTS(ehci->hcs_params); | 332 | int port = HCS_N_PORTS(ehci->hcs_params); |
333 | 333 | ||
334 | while (port--) { | 334 | while (port--) { |
335 | ehci_writel(ehci, PORT_RWC_BITS, | ||
336 | &ehci->regs->port_status[port]); | ||
337 | spin_unlock_irq(&ehci->lock); | 335 | spin_unlock_irq(&ehci->lock); |
338 | ehci_port_power(ehci, port, false); | 336 | ehci_port_power(ehci, port, false); |
339 | spin_lock_irq(&ehci->lock); | 337 | spin_lock_irq(&ehci->lock); |
338 | ehci_writel(ehci, PORT_RWC_BITS, | ||
339 | &ehci->regs->port_status[port]); | ||
340 | } | 340 | } |
341 | } | 341 | } |
342 | 342 | ||
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index c369c29e496d..2f7690092a7f 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c | |||
@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value) | |||
1675 | if (pin_number > 7) | 1675 | if (pin_number > 7) |
1676 | return; | 1676 | return; |
1677 | 1677 | ||
1678 | mask = 1u << pin_number; | 1678 | mask = 1u << (pin_number % 4); |
1679 | idx = pin_number / 4; | 1679 | idx = pin_number / 4; |
1680 | 1680 | ||
1681 | if (value) | 1681 | if (value) |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index d61fcc48099e..730b9fd26685 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
386 | 386 | ||
387 | ret = 0; | 387 | ret = 0; |
388 | virt_dev = xhci->devs[slot_id]; | 388 | virt_dev = xhci->devs[slot_id]; |
389 | if (!virt_dev) | ||
390 | return -ENODEV; | ||
391 | |||
389 | cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); | 392 | cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); |
390 | if (!cmd) { | 393 | if (!cmd) { |
391 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | 394 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4fd041bec332..d7b0f97abbad 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev) | |||
314 | usb_remove_hcd(xhci->shared_hcd); | 314 | usb_remove_hcd(xhci->shared_hcd); |
315 | usb_put_hcd(xhci->shared_hcd); | 315 | usb_put_hcd(xhci->shared_hcd); |
316 | } | 316 | } |
317 | usb_hcd_pci_remove(dev); | ||
318 | 317 | ||
319 | /* Workaround for spurious wakeups at shutdown with HSW */ | 318 | /* Workaround for spurious wakeups at shutdown with HSW */ |
320 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) | 319 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) |
321 | pci_set_power_state(dev, PCI_D3hot); | 320 | pci_set_power_state(dev, PCI_D3hot); |
321 | |||
322 | usb_hcd_pci_remove(dev); | ||
322 | } | 323 | } |
323 | 324 | ||
324 | #ifdef CONFIG_PM | 325 | #ifdef CONFIG_PM |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 918e0c739b79..797137e26549 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -850,6 +850,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
850 | spin_lock_irqsave(&xhci->lock, flags); | 850 | spin_lock_irqsave(&xhci->lock, flags); |
851 | 851 | ||
852 | ep->stop_cmds_pending--; | 852 | ep->stop_cmds_pending--; |
853 | if (xhci->xhc_state & XHCI_STATE_REMOVING) { | ||
854 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
855 | return; | ||
856 | } | ||
853 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 857 | if (xhci->xhc_state & XHCI_STATE_DYING) { |
854 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 858 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
855 | "Stop EP timer ran, but another timer marked " | 859 | "Stop EP timer ran, but another timer marked " |
@@ -903,7 +907,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) | |||
903 | spin_unlock_irqrestore(&xhci->lock, flags); | 907 | spin_unlock_irqrestore(&xhci->lock, flags); |
904 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 908 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
905 | "Calling usb_hc_died()"); | 909 | "Calling usb_hc_died()"); |
906 | usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); | 910 | usb_hc_died(xhci_to_hcd(xhci)); |
907 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, | 911 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
908 | "xHCI host controller is dead."); | 912 | "xHCI host controller is dead."); |
909 | } | 913 | } |
@@ -1334,12 +1338,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1334 | 1338 | ||
1335 | cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); | 1339 | cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); |
1336 | 1340 | ||
1337 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { | ||
1338 | xhci_err(xhci, | ||
1339 | "Command completion event does not match command\n"); | ||
1340 | return; | ||
1341 | } | ||
1342 | |||
1343 | del_timer(&xhci->cmd_timer); | 1341 | del_timer(&xhci->cmd_timer); |
1344 | 1342 | ||
1345 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); | 1343 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); |
@@ -1351,6 +1349,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1351 | xhci_handle_stopped_cmd_ring(xhci, cmd); | 1349 | xhci_handle_stopped_cmd_ring(xhci, cmd); |
1352 | return; | 1350 | return; |
1353 | } | 1351 | } |
1352 | |||
1353 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { | ||
1354 | xhci_err(xhci, | ||
1355 | "Command completion event does not match command\n"); | ||
1356 | return; | ||
1357 | } | ||
1358 | |||
1354 | /* | 1359 | /* |
1355 | * Host aborted the command ring, check if the current command was | 1360 | * Host aborted the command ring, check if the current command was |
1356 | * supposed to be aborted, otherwise continue normally. | 1361 | * supposed to be aborted, otherwise continue normally. |
@@ -3243,7 +3248,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3243 | send_addr = addr; | 3248 | send_addr = addr; |
3244 | 3249 | ||
3245 | /* Queue the TRBs, even if they are zero-length */ | 3250 | /* Queue the TRBs, even if they are zero-length */ |
3246 | for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) { | 3251 | for (enqd_len = 0; first_trb || enqd_len < full_len; |
3252 | enqd_len += trb_buff_len) { | ||
3247 | field = TRB_TYPE(TRB_NORMAL); | 3253 | field = TRB_TYPE(TRB_NORMAL); |
3248 | 3254 | ||
3249 | /* TRB buffer should not cross 64KB boundaries */ | 3255 | /* TRB buffer should not cross 64KB boundaries */ |
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 52c27cab78c3..9b5b3b2281ca 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c | |||
@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer, | |||
665 | { | 665 | { |
666 | char data[30 *3 + 4]; | 666 | char data[30 *3 + 4]; |
667 | char *d = data; | 667 | char *d = data; |
668 | int m = (sizeof(data) - 1) / 3; | 668 | int m = (sizeof(data) - 1) / 3 - 1; |
669 | int bytes_read = 0; | 669 | int bytes_read = 0; |
670 | int retry_on_empty = 10; | 670 | int retry_on_empty = 10; |
671 | int retry_on_timeout = 5; | 671 | int retry_on_timeout = 5; |
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) { | |||
1684 | int i = 0; | 1684 | int i = 0; |
1685 | char data[30 *3 + 4]; | 1685 | char data[30 *3 + 4]; |
1686 | char *d = data; | 1686 | char *d = data; |
1687 | int m = (sizeof(data) - 1) / 3; | 1687 | int m = (sizeof(data) - 1) / 3 - 1; |
1688 | int l = 0; | 1688 | int l = 0; |
1689 | struct u132_target *target = &ftdi->target[ed]; | 1689 | struct u132_target *target = &ftdi->target[ed]; |
1690 | struct u132_command *command = &ftdi->command[ | 1690 | struct u132_command *command = &ftdi->command[ |
@@ -1876,7 +1876,7 @@ more:{ | |||
1876 | if (packet_bytes > 2) { | 1876 | if (packet_bytes > 2) { |
1877 | char diag[30 *3 + 4]; | 1877 | char diag[30 *3 + 4]; |
1878 | char *d = diag; | 1878 | char *d = diag; |
1879 | int m = (sizeof(diag) - 1) / 3; | 1879 | int m = (sizeof(diag) - 1) / 3 - 1; |
1880 | char *b = ftdi->bulk_in_buffer; | 1880 | char *b = ftdi->bulk_in_buffer; |
1881 | int bytes_read = 0; | 1881 | int bytes_read = 0; |
1882 | diag[0] = 0; | 1882 | diag[0] = 0; |
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) | |||
2053 | if (packet_bytes > 2) { | 2053 | if (packet_bytes > 2) { |
2054 | char diag[30 *3 + 4]; | 2054 | char diag[30 *3 + 4]; |
2055 | char *d = diag; | 2055 | char *d = diag; |
2056 | int m = (sizeof(diag) - 1) / 3; | 2056 | int m = (sizeof(diag) - 1) / 3 - 1; |
2057 | char *b = ftdi->bulk_in_buffer; | 2057 | char *b = ftdi->bulk_in_buffer; |
2058 | int bytes_read = 0; | 2058 | int bytes_read = 0; |
2059 | unsigned char c = 0; | 2059 | unsigned char c = 0; |
@@ -2155,7 +2155,7 @@ more:{ | |||
2155 | if (packet_bytes > 2) { | 2155 | if (packet_bytes > 2) { |
2156 | char diag[30 *3 + 4]; | 2156 | char diag[30 *3 + 4]; |
2157 | char *d = diag; | 2157 | char *d = diag; |
2158 | int m = (sizeof(diag) - 1) / 3; | 2158 | int m = (sizeof(diag) - 1) / 3 - 1; |
2159 | char *b = ftdi->bulk_in_buffer; | 2159 | char *b = ftdi->bulk_in_buffer; |
2160 | int bytes_read = 0; | 2160 | int bytes_read = 0; |
2161 | diag[0] = 0; | 2161 | diag[0] = 0; |
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 6b978f04b8d7..5c8210dc6fd9 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c | |||
@@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req) | |||
585 | { | 585 | { |
586 | struct usb_sg_request *req = (struct usb_sg_request *) _req; | 586 | struct usb_sg_request *req = (struct usb_sg_request *) _req; |
587 | 587 | ||
588 | req->status = -ETIMEDOUT; | ||
589 | usb_sg_cancel(req); | 588 | usb_sg_cancel(req); |
590 | } | 589 | } |
591 | 590 | ||
@@ -616,8 +615,10 @@ static int perform_sglist( | |||
616 | mod_timer(&sg_timer, jiffies + | 615 | mod_timer(&sg_timer, jiffies + |
617 | msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); | 616 | msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); |
618 | usb_sg_wait(req); | 617 | usb_sg_wait(req); |
619 | del_timer_sync(&sg_timer); | 618 | if (!del_timer_sync(&sg_timer)) |
620 | retval = req->status; | 619 | retval = -ETIMEDOUT; |
620 | else | ||
621 | retval = req->status; | ||
621 | 622 | ||
622 | /* FIXME check resulting data pattern */ | 623 | /* FIXME check resulting data pattern */ |
623 | 624 | ||
@@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) | |||
2602 | ktime_get_ts64(&start); | 2603 | ktime_get_ts64(&start); |
2603 | 2604 | ||
2604 | retval = usbtest_do_ioctl(intf, param_32); | 2605 | retval = usbtest_do_ioctl(intf, param_32); |
2605 | if (retval) | 2606 | if (retval < 0) |
2606 | goto free_mutex; | 2607 | goto free_mutex; |
2607 | 2608 | ||
2608 | ktime_get_ts64(&end); | 2609 | ktime_get_ts64(&end); |
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 886526b5fcdd..73cfa13fc0dc 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX | |||
87 | config USB_MUSB_TUSB6010 | 87 | config USB_MUSB_TUSB6010 |
88 | tristate "TUSB6010" | 88 | tristate "TUSB6010" |
89 | depends on HAS_IOMEM | 89 | depends on HAS_IOMEM |
90 | depends on ARCH_OMAP2PLUS || COMPILE_TEST | 90 | depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN |
91 | depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules | 91 | depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules |
92 | 92 | ||
93 | config USB_MUSB_OMAP2PLUS | 93 | config USB_MUSB_OMAP2PLUS |
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 192248f974ec..fe08e776fec3 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c | |||
@@ -290,6 +290,7 @@ int musb_hub_control( | |||
290 | u32 temp; | 290 | u32 temp; |
291 | int retval = 0; | 291 | int retval = 0; |
292 | unsigned long flags; | 292 | unsigned long flags; |
293 | bool start_musb = false; | ||
293 | 294 | ||
294 | spin_lock_irqsave(&musb->lock, flags); | 295 | spin_lock_irqsave(&musb->lock, flags); |
295 | 296 | ||
@@ -390,7 +391,7 @@ int musb_hub_control( | |||
390 | * logic relating to VBUS power-up. | 391 | * logic relating to VBUS power-up. |
391 | */ | 392 | */ |
392 | if (!hcd->self.is_b_host && musb_has_gadget(musb)) | 393 | if (!hcd->self.is_b_host && musb_has_gadget(musb)) |
393 | musb_start(musb); | 394 | start_musb = true; |
394 | break; | 395 | break; |
395 | case USB_PORT_FEAT_RESET: | 396 | case USB_PORT_FEAT_RESET: |
396 | musb_port_reset(musb, true); | 397 | musb_port_reset(musb, true); |
@@ -451,5 +452,9 @@ error: | |||
451 | retval = -EPIPE; | 452 | retval = -EPIPE; |
452 | } | 453 | } |
453 | spin_unlock_irqrestore(&musb->lock, flags); | 454 | spin_unlock_irqrestore(&musb->lock, flags); |
455 | |||
456 | if (start_musb) | ||
457 | musb_start(musb); | ||
458 | |||
454 | return retval; | 459 | return retval; |
455 | } | 460 | } |
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 980c9dee09eb..427efb5eebae 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c | |||
@@ -144,14 +144,18 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data) | |||
144 | int usb_gen_phy_init(struct usb_phy *phy) | 144 | int usb_gen_phy_init(struct usb_phy *phy) |
145 | { | 145 | { |
146 | struct usb_phy_generic *nop = dev_get_drvdata(phy->dev); | 146 | struct usb_phy_generic *nop = dev_get_drvdata(phy->dev); |
147 | int ret; | ||
147 | 148 | ||
148 | if (!IS_ERR(nop->vcc)) { | 149 | if (!IS_ERR(nop->vcc)) { |
149 | if (regulator_enable(nop->vcc)) | 150 | if (regulator_enable(nop->vcc)) |
150 | dev_err(phy->dev, "Failed to enable power\n"); | 151 | dev_err(phy->dev, "Failed to enable power\n"); |
151 | } | 152 | } |
152 | 153 | ||
153 | if (!IS_ERR(nop->clk)) | 154 | if (!IS_ERR(nop->clk)) { |
154 | clk_prepare_enable(nop->clk); | 155 | ret = clk_prepare_enable(nop->clk); |
156 | if (ret) | ||
157 | return ret; | ||
158 | } | ||
155 | 159 | ||
156 | nop_reset(nop); | 160 | nop_reset(nop); |
157 | 161 | ||
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 6f6d2a7fd5a0..6523af4f8f93 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c | |||
@@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev) | |||
140 | (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, | 140 | (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, |
141 | otg_dev->vbus); | 141 | otg_dev->vbus); |
142 | 142 | ||
143 | platform_set_drvdata(pdev, otg_dev); | ||
144 | |||
143 | return 0; | 145 | return 0; |
144 | } | 146 | } |
145 | 147 | ||
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 8fbbc2d32371..ac67bab9124c 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c | |||
@@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) | |||
514 | if (gpio > 0) | 514 | if (gpio > 0) |
515 | dparam->enable_gpio = gpio; | 515 | dparam->enable_gpio = gpio; |
516 | 516 | ||
517 | if (dparam->type == USBHS_TYPE_RCAR_GEN2) | 517 | if (dparam->type == USBHS_TYPE_RCAR_GEN2 || |
518 | dparam->type == USBHS_TYPE_RCAR_GEN3) | ||
518 | dparam->has_usb_dmac = 1; | 519 | dparam->has_usb_dmac = 1; |
519 | 520 | ||
520 | return info; | 521 | return info; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 280ed5ff021b..857e78337324 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) | |||
871 | 871 | ||
872 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ | 872 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ |
873 | if ((len < usbhs_get_dparam(priv, pio_dma_border)) || | 873 | if ((len < usbhs_get_dparam(priv, pio_dma_border)) || |
874 | usbhs_pipe_is_dcp(pipe)) | 874 | usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) |
875 | goto usbhsf_pio_prepare_push; | 875 | goto usbhsf_pio_prepare_push; |
876 | 876 | ||
877 | /* check data length if this driver don't use USB-DMAC */ | 877 | /* check data length if this driver don't use USB-DMAC */ |
@@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, | |||
976 | 976 | ||
977 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ | 977 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ |
978 | if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || | 978 | if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || |
979 | usbhs_pipe_is_dcp(pipe)) | 979 | usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) |
980 | goto usbhsf_pio_prepare_pop; | 980 | goto usbhsf_pio_prepare_pop; |
981 | 981 | ||
982 | fifo = usbhsf_get_dma_fifo(priv, pkt); | 982 | fifo = usbhsf_get_dma_fifo(priv, pkt); |
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c index d4be5d594896..28965ef4f824 100644 --- a/drivers/usb/renesas_usbhs/mod.c +++ b/drivers/usb/renesas_usbhs/mod.c | |||
@@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data) | |||
282 | if (usbhs_mod_is_host(priv)) | 282 | if (usbhs_mod_is_host(priv)) |
283 | usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); | 283 | usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); |
284 | 284 | ||
285 | usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); | 285 | /* |
286 | * The driver should not clear the xxxSTS after the line of | ||
287 | * "call irq callback functions" because each "if" statement is | ||
288 | * possible to call the callback function for avoiding any side effects. | ||
289 | */ | ||
290 | if (irq_state.intsts0 & BRDY) | ||
291 | usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); | ||
286 | usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts); | 292 | usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts); |
287 | usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); | 293 | if (irq_state.intsts0 & BEMP) |
294 | usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); | ||
288 | 295 | ||
289 | /* | 296 | /* |
290 | * call irq callback functions | 297 | * call irq callback functions |
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 50f3363cc382..c4c64740a3e7 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
@@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep, | |||
617 | * use dmaengine if possible. | 617 | * use dmaengine if possible. |
618 | * It will use pio handler if impossible. | 618 | * It will use pio handler if impossible. |
619 | */ | 619 | */ |
620 | if (usb_endpoint_dir_in(desc)) | 620 | if (usb_endpoint_dir_in(desc)) { |
621 | pipe->handler = &usbhs_fifo_dma_push_handler; | 621 | pipe->handler = &usbhs_fifo_dma_push_handler; |
622 | else | 622 | } else { |
623 | pipe->handler = &usbhs_fifo_dma_pop_handler; | 623 | pipe->handler = &usbhs_fifo_dma_pop_handler; |
624 | usbhs_xxxsts_clear(priv, BRDYSTS, | ||
625 | usbhs_pipe_number(pipe)); | ||
626 | } | ||
624 | 627 | ||
625 | ret = 0; | 628 | ret = 0; |
626 | } | 629 | } |
@@ -1073,7 +1076,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) | |||
1073 | 1076 | ||
1074 | gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); | 1077 | gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); |
1075 | dev_info(dev, "%stransceiver found\n", | 1078 | dev_info(dev, "%stransceiver found\n", |
1076 | gpriv->transceiver ? "" : "no "); | 1079 | !IS_ERR(gpriv->transceiver) ? "" : "no "); |
1077 | 1080 | ||
1078 | /* | 1081 | /* |
1079 | * CAUTION | 1082 | * CAUTION |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 00820809139a..b2d767e743fc 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
648 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, | 648 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, |
649 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, | 649 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, |
650 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, | 650 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, |
651 | { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, | ||
652 | { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, | ||
651 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 653 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
652 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 654 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
653 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, | 655 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, |
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
1008 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, | 1010 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, |
1009 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, | 1011 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, |
1010 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, | 1012 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, |
1013 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, | ||
1011 | { } /* Terminating entry */ | 1014 | { } /* Terminating entry */ |
1012 | }; | 1015 | }; |
1013 | 1016 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c5d6c1e73e8e..f87a938cf005 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -406,6 +406,12 @@ | |||
406 | #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 | 406 | #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 |
407 | 407 | ||
408 | /* | 408 | /* |
409 | * Ivium Technologies product IDs | ||
410 | */ | ||
411 | #define FTDI_PALMSENS_PID 0xf440 | ||
412 | #define FTDI_IVIUM_XSTAT_PID 0xf441 | ||
413 | |||
414 | /* | ||
409 | * Linx Technologies product ids | 415 | * Linx Technologies product ids |
410 | */ | 416 | */ |
411 | #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ | 417 | #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ |
@@ -673,6 +679,12 @@ | |||
673 | #define INTREPID_NEOVI_PID 0x0701 | 679 | #define INTREPID_NEOVI_PID 0x0701 |
674 | 680 | ||
675 | /* | 681 | /* |
682 | * WICED USB UART | ||
683 | */ | ||
684 | #define WICED_VID 0x0A5C | ||
685 | #define WICED_USB20706V2_PID 0x6422 | ||
686 | |||
687 | /* | ||
676 | * Definitions for ID TECH (www.idt-net.com) devices | 688 | * Definitions for ID TECH (www.idt-net.com) devices |
677 | */ | 689 | */ |
678 | #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ | 690 | #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 5608af4a369d..de9992b492b0 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, | |||
1252 | 1252 | ||
1253 | if (urb->transfer_buffer == NULL) { | 1253 | if (urb->transfer_buffer == NULL) { |
1254 | urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, | 1254 | urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, |
1255 | GFP_KERNEL); | 1255 | GFP_ATOMIC); |
1256 | if (!urb->transfer_buffer) | 1256 | if (!urb->transfer_buffer) |
1257 | goto exit; | 1257 | goto exit; |
1258 | } | 1258 | } |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index ed378fb232e7..57426d703a09 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, | |||
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | if (urb->transfer_buffer == NULL) { | 1342 | if (urb->transfer_buffer == NULL) { |
1343 | urb->transfer_buffer = | 1343 | urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, |
1344 | kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); | 1344 | GFP_ATOMIC); |
1345 | if (!urb->transfer_buffer) | 1345 | if (!urb->transfer_buffer) |
1346 | goto exit; | 1346 | goto exit; |
1347 | } | 1347 | } |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8e07536c233a..9894e341c6ac 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb); | |||
274 | #define TELIT_PRODUCT_LE920 0x1200 | 274 | #define TELIT_PRODUCT_LE920 0x1200 |
275 | #define TELIT_PRODUCT_LE910 0x1201 | 275 | #define TELIT_PRODUCT_LE910 0x1201 |
276 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 | 276 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 |
277 | #define TELIT_PRODUCT_LE920A4_1207 0x1207 | ||
278 | #define TELIT_PRODUCT_LE920A4_1208 0x1208 | ||
279 | #define TELIT_PRODUCT_LE920A4_1211 0x1211 | ||
280 | #define TELIT_PRODUCT_LE920A4_1212 0x1212 | ||
281 | #define TELIT_PRODUCT_LE920A4_1213 0x1213 | ||
282 | #define TELIT_PRODUCT_LE920A4_1214 0x1214 | ||
277 | 283 | ||
278 | /* ZTE PRODUCTS */ | 284 | /* ZTE PRODUCTS */ |
279 | #define ZTE_VENDOR_ID 0x19d2 | 285 | #define ZTE_VENDOR_ID 0x19d2 |
@@ -519,6 +525,12 @@ static void option_instat_callback(struct urb *urb); | |||
519 | #define VIATELECOM_VENDOR_ID 0x15eb | 525 | #define VIATELECOM_VENDOR_ID 0x15eb |
520 | #define VIATELECOM_PRODUCT_CDS7 0x0001 | 526 | #define VIATELECOM_PRODUCT_CDS7 0x0001 |
521 | 527 | ||
528 | /* WeTelecom products */ | ||
529 | #define WETELECOM_VENDOR_ID 0x22de | ||
530 | #define WETELECOM_PRODUCT_WMD200 0x6801 | ||
531 | #define WETELECOM_PRODUCT_6802 0x6802 | ||
532 | #define WETELECOM_PRODUCT_WMD300 0x6803 | ||
533 | |||
522 | struct option_blacklist_info { | 534 | struct option_blacklist_info { |
523 | /* bitmask of interface numbers blacklisted for send_setup */ | 535 | /* bitmask of interface numbers blacklisted for send_setup */ |
524 | const unsigned long sendsetup; | 536 | const unsigned long sendsetup; |
@@ -628,6 +640,11 @@ static const struct option_blacklist_info telit_le920_blacklist = { | |||
628 | .reserved = BIT(1) | BIT(5), | 640 | .reserved = BIT(1) | BIT(5), |
629 | }; | 641 | }; |
630 | 642 | ||
643 | static const struct option_blacklist_info telit_le920a4_blacklist_1 = { | ||
644 | .sendsetup = BIT(0), | ||
645 | .reserved = BIT(1), | ||
646 | }; | ||
647 | |||
631 | static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { | 648 | static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { |
632 | .sendsetup = BIT(2), | 649 | .sendsetup = BIT(2), |
633 | .reserved = BIT(0) | BIT(1) | BIT(3), | 650 | .reserved = BIT(0) | BIT(1) | BIT(3), |
@@ -1203,6 +1220,16 @@ static const struct usb_device_id option_ids[] = { | |||
1203 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | 1220 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, |
1204 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), | 1221 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), |
1205 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, | 1222 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, |
1223 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, | ||
1224 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), | ||
1225 | .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, | ||
1226 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), | ||
1227 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | ||
1228 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), | ||
1229 | .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, | ||
1230 | { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, | ||
1231 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), | ||
1232 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | ||
1206 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ | 1233 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
1207 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), | 1234 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), |
1208 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, | 1235 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
@@ -1966,9 +1993,13 @@ static const struct usb_device_id option_ids[] = { | |||
1966 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1993 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1967 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ | 1994 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ |
1968 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ | 1995 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ |
1996 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ | ||
1969 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ | 1997 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ |
1970 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, | 1998 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, |
1971 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, | 1999 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, |
2000 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, | ||
2001 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, | ||
2002 | { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, | ||
1972 | { } /* Terminating entry */ | 2003 | { } /* Terminating entry */ |
1973 | }; | 2004 | }; |
1974 | MODULE_DEVICE_TABLE(usb, option_ids); | 2005 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index a204782ae530..e98b6e57b703 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c | |||
@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS); | |||
54 | /* Infineon Flashloader driver */ | 54 | /* Infineon Flashloader driver */ |
55 | #define FLASHLOADER_IDS() \ | 55 | #define FLASHLOADER_IDS() \ |
56 | { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ | 56 | { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ |
57 | { USB_DEVICE(0x8087, 0x0716) } | 57 | { USB_DEVICE(0x8087, 0x0716) }, \ |
58 | { USB_DEVICE(0x8087, 0x0801) } | ||
58 | DEVICE(flashloader, FLASHLOADER_IDS); | 59 | DEVICE(flashloader, FLASHLOADER_IDS); |
59 | 60 | ||
60 | /* Google Serial USB SubClass */ | 61 | /* Google Serial USB SubClass */ |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index b1b9bac44016..d213cf44a7e4 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] | |||
1433 | 1433 | ||
1434 | rc = usb_register(udriver); | 1434 | rc = usb_register(udriver); |
1435 | if (rc) | 1435 | if (rc) |
1436 | return rc; | 1436 | goto failed_usb_register; |
1437 | 1437 | ||
1438 | for (sd = serial_drivers; *sd; ++sd) { | 1438 | for (sd = serial_drivers; *sd; ++sd) { |
1439 | (*sd)->usb_driver = udriver; | 1439 | (*sd)->usb_driver = udriver; |
@@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] | |||
1451 | while (sd-- > serial_drivers) | 1451 | while (sd-- > serial_drivers) |
1452 | usb_serial_deregister(*sd); | 1452 | usb_serial_deregister(*sd); |
1453 | usb_deregister(udriver); | 1453 | usb_deregister(udriver); |
1454 | failed_usb_register: | ||
1455 | kfree(udriver); | ||
1454 | return rc; | 1456 | return rc; |
1455 | } | 1457 | } |
1456 | EXPORT_SYMBOL_GPL(usb_serial_register_drivers); | 1458 | EXPORT_SYMBOL_GPL(usb_serial_register_drivers); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 9d6320e8ff3e..6e29d053843d 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd { | |||
88 | struct scatterlist *tvc_prot_sgl; | 88 | struct scatterlist *tvc_prot_sgl; |
89 | struct page **tvc_upages; | 89 | struct page **tvc_upages; |
90 | /* Pointer to response header iovec */ | 90 | /* Pointer to response header iovec */ |
91 | struct iovec *tvc_resp_iov; | 91 | struct iovec tvc_resp_iov; |
92 | /* Pointer to vhost_scsi for our device */ | 92 | /* Pointer to vhost_scsi for our device */ |
93 | struct vhost_scsi *tvc_vhost; | 93 | struct vhost_scsi *tvc_vhost; |
94 | /* Pointer to vhost_virtqueue for the cmd */ | 94 | /* Pointer to vhost_virtqueue for the cmd */ |
@@ -547,7 +547,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
547 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, | 547 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, |
548 | se_cmd->scsi_sense_length); | 548 | se_cmd->scsi_sense_length); |
549 | 549 | ||
550 | iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, | 550 | iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov, |
551 | cmd->tvc_in_iovs, sizeof(v_rsp)); | 551 | cmd->tvc_in_iovs, sizeof(v_rsp)); |
552 | ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); | 552 | ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); |
553 | if (likely(ret == sizeof(v_rsp))) { | 553 | if (likely(ret == sizeof(v_rsp))) { |
@@ -1044,7 +1044,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1044 | } | 1044 | } |
1045 | cmd->tvc_vhost = vs; | 1045 | cmd->tvc_vhost = vs; |
1046 | cmd->tvc_vq = vq; | 1046 | cmd->tvc_vq = vq; |
1047 | cmd->tvc_resp_iov = &vq->iov[out]; | 1047 | cmd->tvc_resp_iov = vq->iov[out]; |
1048 | cmd->tvc_in_iovs = in; | 1048 | cmd->tvc_in_iovs = in; |
1049 | 1049 | ||
1050 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", | 1050 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", |
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 388eec4e1a90..97fb2f8fa930 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c | |||
@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n) | |||
220 | { | 220 | { |
221 | void *priv = NULL; | 221 | void *priv = NULL; |
222 | long err; | 222 | long err; |
223 | struct vhost_memory *memory; | 223 | struct vhost_umem *umem; |
224 | 224 | ||
225 | mutex_lock(&n->dev.mutex); | 225 | mutex_lock(&n->dev.mutex); |
226 | err = vhost_dev_check_owner(&n->dev); | 226 | err = vhost_dev_check_owner(&n->dev); |
227 | if (err) | 227 | if (err) |
228 | goto done; | 228 | goto done; |
229 | memory = vhost_dev_reset_owner_prepare(); | 229 | umem = vhost_dev_reset_owner_prepare(); |
230 | if (!memory) { | 230 | if (!umem) { |
231 | err = -ENOMEM; | 231 | err = -ENOMEM; |
232 | goto done; | 232 | goto done; |
233 | } | 233 | } |
234 | vhost_test_stop(n, &priv); | 234 | vhost_test_stop(n, &priv); |
235 | vhost_test_flush(n); | 235 | vhost_test_flush(n); |
236 | vhost_dev_reset_owner(&n->dev, memory); | 236 | vhost_dev_reset_owner(&n->dev, umem); |
237 | done: | 237 | done: |
238 | mutex_unlock(&n->dev.mutex); | 238 | mutex_unlock(&n->dev.mutex); |
239 | return err; | 239 | return err; |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index e383ecdaca59..ed9c9eeedfe5 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev) | |||
167 | * making all of the arch DMA ops work on the vring device itself | 167 | * making all of the arch DMA ops work on the vring device itself |
168 | * is a mess. For now, we use the parent device for DMA ops. | 168 | * is a mess. For now, we use the parent device for DMA ops. |
169 | */ | 169 | */ |
170 | struct device *vring_dma_dev(const struct vring_virtqueue *vq) | 170 | static struct device *vring_dma_dev(const struct vring_virtqueue *vq) |
171 | { | 171 | { |
172 | return vq->vq.vdev->dev.parent; | 172 | return vq->vq.vdev->dev.parent; |
173 | } | 173 | } |
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 7487971f9f78..c1010f018bd8 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type, | |||
316 | rc = -ENOMEM; | 316 | rc = -ENOMEM; |
317 | goto out; | 317 | goto out; |
318 | } | 318 | } |
319 | } else { | 319 | } else if (msg_type == XS_TRANSACTION_END) { |
320 | list_for_each_entry(trans, &u->transactions, list) | 320 | list_for_each_entry(trans, &u->transactions, list) |
321 | if (trans->handle.id == u->u.msg.tx_id) | 321 | if (trans->handle.id == u->u.msg.tx_id) |
322 | break; | 322 | break; |