diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-09-01 12:33:46 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-09-01 12:33:46 -0400 |
commit | 0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch) | |
tree | 41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /drivers | |
parent | aa877175e7a9982233ed8f10cb4bfddd78d82741 (diff) | |
parent | 3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff) |
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'drivers')
260 files changed, 3766 insertions, 1399 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 8c234dd9b8bc..80cc7c089a15 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
@@ -1527,11 +1527,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) | |||
1527 | { | 1527 | { |
1528 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; | 1528 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; |
1529 | u64 offset = nfit_blk->stat_offset + mmio->size * bw; | 1529 | u64 offset = nfit_blk->stat_offset + mmio->size * bw; |
1530 | const u32 STATUS_MASK = 0x80000037; | ||
1530 | 1531 | ||
1531 | if (mmio->num_lines) | 1532 | if (mmio->num_lines) |
1532 | offset = to_interleave_offset(offset, mmio); | 1533 | offset = to_interleave_offset(offset, mmio); |
1533 | 1534 | ||
1534 | return readl(mmio->addr.base + offset); | 1535 | return readl(mmio->addr.base + offset) & STATUS_MASK; |
1535 | } | 1536 | } |
1536 | 1537 | ||
1537 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, | 1538 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b71a9c767009..e3d8e4ced4a2 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) | |||
3706 | if (UFDCS->rawcmd == 1) | 3706 | if (UFDCS->rawcmd == 1) |
3707 | UFDCS->rawcmd = 2; | 3707 | UFDCS->rawcmd = 2; |
3708 | 3708 | ||
3709 | if (mode & (FMODE_READ|FMODE_WRITE)) { | 3709 | if (!(mode & FMODE_NDELAY)) { |
3710 | UDRS->last_checked = 0; | 3710 | if (mode & (FMODE_READ|FMODE_WRITE)) { |
3711 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); | 3711 | UDRS->last_checked = 0; |
3712 | check_disk_change(bdev); | 3712 | clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); |
3713 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) | 3713 | check_disk_change(bdev); |
3714 | goto out; | 3714 | if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) |
3715 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) | 3715 | goto out; |
3716 | if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) | ||
3717 | goto out; | ||
3718 | } | ||
3719 | res = -EROFS; | ||
3720 | if ((mode & FMODE_WRITE) && | ||
3721 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) | ||
3716 | goto out; | 3722 | goto out; |
3717 | } | 3723 | } |
3718 | |||
3719 | res = -EROFS; | ||
3720 | |||
3721 | if ((mode & FMODE_WRITE) && | ||
3722 | !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) | ||
3723 | goto out; | ||
3724 | |||
3725 | mutex_unlock(&open_lock); | 3724 | mutex_unlock(&open_lock); |
3726 | mutex_unlock(&floppy_mutex); | 3725 | mutex_unlock(&floppy_mutex); |
3727 | return 0; | 3726 | return 0; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1a04af6d2421..6c6519f6492a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev) | |||
3950 | bool need_put = !!rbd_dev->opts; | 3950 | bool need_put = !!rbd_dev->opts; |
3951 | 3951 | ||
3952 | ceph_oid_destroy(&rbd_dev->header_oid); | 3952 | ceph_oid_destroy(&rbd_dev->header_oid); |
3953 | ceph_oloc_destroy(&rbd_dev->header_oloc); | ||
3953 | 3954 | ||
3954 | rbd_put_client(rbd_dev->rbd_client); | 3955 | rbd_put_client(rbd_dev->rbd_client); |
3955 | rbd_spec_put(rbd_dev->spec); | 3956 | rbd_spec_put(rbd_dev->spec); |
@@ -5336,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus, | |||
5336 | } | 5337 | } |
5337 | spec->pool_id = (u64)rc; | 5338 | spec->pool_id = (u64)rc; |
5338 | 5339 | ||
5339 | /* The ceph file layout needs to fit pool id in 32 bits */ | ||
5340 | |||
5341 | if (spec->pool_id > (u64)U32_MAX) { | ||
5342 | rbd_warn(NULL, "pool id too large (%llu > %u)", | ||
5343 | (unsigned long long)spec->pool_id, U32_MAX); | ||
5344 | rc = -EIO; | ||
5345 | goto err_out_client; | ||
5346 | } | ||
5347 | |||
5348 | rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); | 5340 | rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); |
5349 | if (!rbd_dev) { | 5341 | if (!rbd_dev) { |
5350 | rc = -ENOMEM; | 5342 | rc = -ENOMEM; |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 1523e05c46fc..93b1aaa5ba3b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -391,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk) | |||
391 | num_vqs = 1; | 391 | num_vqs = 1; |
392 | 392 | ||
393 | vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); | 393 | vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); |
394 | if (!vblk->vqs) { | 394 | if (!vblk->vqs) |
395 | err = -ENOMEM; | 395 | return -ENOMEM; |
396 | goto out; | ||
397 | } | ||
398 | 396 | ||
399 | names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); | 397 | names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); |
400 | if (!names) | ||
401 | goto err_names; | ||
402 | |||
403 | callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); | 398 | callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); |
404 | if (!callbacks) | ||
405 | goto err_callbacks; | ||
406 | |||
407 | vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); | 399 | vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); |
408 | if (!vqs) | 400 | if (!names || !callbacks || !vqs) { |
409 | goto err_vqs; | 401 | err = -ENOMEM; |
402 | goto out; | ||
403 | } | ||
410 | 404 | ||
411 | for (i = 0; i < num_vqs; i++) { | 405 | for (i = 0; i < num_vqs; i++) { |
412 | callbacks[i] = virtblk_done; | 406 | callbacks[i] = virtblk_done; |
@@ -417,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk) | |||
417 | /* Discover virtqueues and write information to configuration. */ | 411 | /* Discover virtqueues and write information to configuration. */ |
418 | err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); | 412 | err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); |
419 | if (err) | 413 | if (err) |
420 | goto err_find_vqs; | 414 | goto out; |
421 | 415 | ||
422 | for (i = 0; i < num_vqs; i++) { | 416 | for (i = 0; i < num_vqs; i++) { |
423 | spin_lock_init(&vblk->vqs[i].lock); | 417 | spin_lock_init(&vblk->vqs[i].lock); |
@@ -425,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk) | |||
425 | } | 419 | } |
426 | vblk->num_vqs = num_vqs; | 420 | vblk->num_vqs = num_vqs; |
427 | 421 | ||
428 | err_find_vqs: | 422 | out: |
429 | kfree(vqs); | 423 | kfree(vqs); |
430 | err_vqs: | ||
431 | kfree(callbacks); | 424 | kfree(callbacks); |
432 | err_callbacks: | ||
433 | kfree(names); | 425 | kfree(names); |
434 | err_names: | ||
435 | if (err) | 426 | if (err) |
436 | kfree(vblk->vqs); | 427 | kfree(vblk->vqs); |
437 | out: | ||
438 | return err; | 428 | return err; |
439 | } | 429 | } |
440 | 430 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index be4fea6a5dd3..88ef6d4729b4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -189,6 +189,8 @@ struct blkfront_info | |||
189 | struct mutex mutex; | 189 | struct mutex mutex; |
190 | struct xenbus_device *xbdev; | 190 | struct xenbus_device *xbdev; |
191 | struct gendisk *gd; | 191 | struct gendisk *gd; |
192 | u16 sector_size; | ||
193 | unsigned int physical_sector_size; | ||
192 | int vdevice; | 194 | int vdevice; |
193 | blkif_vdev_t handle; | 195 | blkif_vdev_t handle; |
194 | enum blkif_state connected; | 196 | enum blkif_state connected; |
@@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = { | |||
910 | .map_queue = blk_mq_map_queue, | 912 | .map_queue = blk_mq_map_queue, |
911 | }; | 913 | }; |
912 | 914 | ||
915 | static void blkif_set_queue_limits(struct blkfront_info *info) | ||
916 | { | ||
917 | struct request_queue *rq = info->rq; | ||
918 | struct gendisk *gd = info->gd; | ||
919 | unsigned int segments = info->max_indirect_segments ? : | ||
920 | BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||
921 | |||
922 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | ||
923 | |||
924 | if (info->feature_discard) { | ||
925 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | ||
926 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | ||
927 | rq->limits.discard_granularity = info->discard_granularity; | ||
928 | rq->limits.discard_alignment = info->discard_alignment; | ||
929 | if (info->feature_secdiscard) | ||
930 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
931 | } | ||
932 | |||
933 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
934 | blk_queue_logical_block_size(rq, info->sector_size); | ||
935 | blk_queue_physical_block_size(rq, info->physical_sector_size); | ||
936 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
937 | |||
938 | /* Each segment in a request is up to an aligned page in size. */ | ||
939 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
940 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
941 | |||
942 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
943 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
944 | |||
945 | /* Make sure buffer addresses are sector-aligned. */ | ||
946 | blk_queue_dma_alignment(rq, 511); | ||
947 | |||
948 | /* Make sure we don't use bounce buffers. */ | ||
949 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
950 | } | ||
951 | |||
913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 952 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
914 | unsigned int physical_sector_size, | 953 | unsigned int physical_sector_size) |
915 | unsigned int segments) | ||
916 | { | 954 | { |
917 | struct request_queue *rq; | 955 | struct request_queue *rq; |
918 | struct blkfront_info *info = gd->private_data; | 956 | struct blkfront_info *info = gd->private_data; |
@@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
944 | } | 982 | } |
945 | 983 | ||
946 | rq->queuedata = info; | 984 | rq->queuedata = info; |
947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 985 | info->rq = gd->queue = rq; |
948 | 986 | info->gd = gd; | |
949 | if (info->feature_discard) { | 987 | info->sector_size = sector_size; |
950 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); | 988 | info->physical_sector_size = physical_sector_size; |
951 | blk_queue_max_discard_sectors(rq, get_capacity(gd)); | 989 | blkif_set_queue_limits(info); |
952 | rq->limits.discard_granularity = info->discard_granularity; | ||
953 | rq->limits.discard_alignment = info->discard_alignment; | ||
954 | if (info->feature_secdiscard) | ||
955 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); | ||
956 | } | ||
957 | |||
958 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
959 | blk_queue_logical_block_size(rq, sector_size); | ||
960 | blk_queue_physical_block_size(rq, physical_sector_size); | ||
961 | blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); | ||
962 | |||
963 | /* Each segment in a request is up to an aligned page in size. */ | ||
964 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
965 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
966 | |||
967 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
968 | blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); | ||
969 | |||
970 | /* Make sure buffer addresses are sector-aligned. */ | ||
971 | blk_queue_dma_alignment(rq, 511); | ||
972 | |||
973 | /* Make sure we don't use bounce buffers. */ | ||
974 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | ||
975 | |||
976 | gd->queue = rq; | ||
977 | 990 | ||
978 | return 0; | 991 | return 0; |
979 | } | 992 | } |
@@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, | |||
1136 | gd->private_data = info; | 1149 | gd->private_data = info; |
1137 | set_capacity(gd, capacity); | 1150 | set_capacity(gd, capacity); |
1138 | 1151 | ||
1139 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, | 1152 | if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) { |
1140 | info->max_indirect_segments ? : | ||
1141 | BLKIF_MAX_SEGMENTS_PER_REQUEST)) { | ||
1142 | del_gendisk(gd); | 1153 | del_gendisk(gd); |
1143 | goto release; | 1154 | goto release; |
1144 | } | 1155 | } |
1145 | 1156 | ||
1146 | info->rq = gd->queue; | ||
1147 | info->gd = gd; | ||
1148 | |||
1149 | xlvbd_flush(info); | 1157 | xlvbd_flush(info); |
1150 | 1158 | ||
1151 | if (vdisk_info & VDISK_READONLY) | 1159 | if (vdisk_info & VDISK_READONLY) |
@@ -1315,7 +1323,7 @@ free_shadow: | |||
1315 | rinfo->ring_ref[i] = GRANT_INVALID_REF; | 1323 | rinfo->ring_ref[i] = GRANT_INVALID_REF; |
1316 | } | 1324 | } |
1317 | } | 1325 | } |
1318 | free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE)); | 1326 | free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); |
1319 | rinfo->ring.sring = NULL; | 1327 | rinfo->ring.sring = NULL; |
1320 | 1328 | ||
1321 | if (rinfo->irq) | 1329 | if (rinfo->irq) |
@@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info) | |||
2007 | struct split_bio *split_bio; | 2015 | struct split_bio *split_bio; |
2008 | 2016 | ||
2009 | blkfront_gather_backend_features(info); | 2017 | blkfront_gather_backend_features(info); |
2018 | /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ | ||
2019 | blkif_set_queue_limits(info); | ||
2010 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; | 2020 | segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; |
2011 | blk_queue_max_segments(info->rq, segs); | 2021 | blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); |
2012 | 2022 | ||
2013 | for (r_index = 0; r_index < info->nr_rings; r_index++) { | 2023 | for (r_index = 0; r_index < info->nr_rings; r_index++) { |
2014 | struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; | 2024 | struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; |
@@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info) | |||
2432 | if (err) { | 2442 | if (err) { |
2433 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", | 2443 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", |
2434 | info->xbdev->otherend); | 2444 | info->xbdev->otherend); |
2435 | return; | 2445 | goto fail; |
2436 | } | 2446 | } |
2437 | 2447 | ||
2438 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 2448 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
@@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info) | |||
2445 | device_add_disk(&info->xbdev->dev, info->gd); | 2455 | device_add_disk(&info->xbdev->dev, info->gd); |
2446 | 2456 | ||
2447 | info->is_ready = 1; | 2457 | info->is_ready = 1; |
2458 | return; | ||
2459 | |||
2460 | fail: | ||
2461 | blkif_free(info, 0); | ||
2462 | return; | ||
2448 | } | 2463 | } |
2449 | 2464 | ||
2450 | /** | 2465 | /** |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 28bce3f4f81d..57700541f951 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | |||
12 | #define pr_fmt(fmt) "arm_arch_timer: " fmt | ||
13 | |||
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
12 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
13 | #include <linux/device.h> | 16 | #include <linux/device.h> |
@@ -370,16 +373,33 @@ static bool arch_timer_has_nonsecure_ppi(void) | |||
370 | arch_timer_ppi[PHYS_NONSECURE_PPI]); | 373 | arch_timer_ppi[PHYS_NONSECURE_PPI]); |
371 | } | 374 | } |
372 | 375 | ||
376 | static u32 check_ppi_trigger(int irq) | ||
377 | { | ||
378 | u32 flags = irq_get_trigger_type(irq); | ||
379 | |||
380 | if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) { | ||
381 | pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq); | ||
382 | pr_warn("WARNING: Please fix your firmware\n"); | ||
383 | flags = IRQF_TRIGGER_LOW; | ||
384 | } | ||
385 | |||
386 | return flags; | ||
387 | } | ||
388 | |||
373 | static int arch_timer_starting_cpu(unsigned int cpu) | 389 | static int arch_timer_starting_cpu(unsigned int cpu) |
374 | { | 390 | { |
375 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); | 391 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); |
392 | u32 flags; | ||
376 | 393 | ||
377 | __arch_timer_setup(ARCH_CP15_TIMER, clk); | 394 | __arch_timer_setup(ARCH_CP15_TIMER, clk); |
378 | 395 | ||
379 | enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); | 396 | flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); |
397 | enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); | ||
380 | 398 | ||
381 | if (arch_timer_has_nonsecure_ppi()) | 399 | if (arch_timer_has_nonsecure_ppi()) { |
382 | enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); | 400 | flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]); |
401 | enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags); | ||
402 | } | ||
383 | 403 | ||
384 | arch_counter_set_user_access(); | 404 | arch_counter_set_user_access(); |
385 | if (evtstrm_enable) | 405 | if (evtstrm_enable) |
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 7e3fd375a627..92f6e4deee74 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c | |||
@@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base) | |||
66 | 66 | ||
67 | } | 67 | } |
68 | 68 | ||
69 | static void | 69 | static int |
70 | kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) | 70 | kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) |
71 | { | 71 | { |
72 | int loop_limit = 4; | 72 | int loop_limit = 3; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Read 64-bit free running counter | 75 | * Read 64-bit free running counter |
@@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) | |||
83 | * if new hi-word is equal to previously read hi-word then stop. | 83 | * if new hi-word is equal to previously read hi-word then stop. |
84 | */ | 84 | */ |
85 | 85 | ||
86 | while (--loop_limit) { | 86 | do { |
87 | *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); | 87 | *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); |
88 | *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); | 88 | *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); |
89 | if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) | 89 | if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) |
90 | break; | 90 | break; |
91 | } | 91 | } while (--loop_limit); |
92 | if (!loop_limit) { | 92 | if (!loop_limit) { |
93 | pr_err("bcm_kona_timer: getting counter failed.\n"); | 93 | pr_err("bcm_kona_timer: getting counter failed.\n"); |
94 | pr_err(" Timer will be impacted\n"); | 94 | pr_err(" Timer will be impacted\n"); |
95 | return -ETIMEDOUT; | ||
95 | } | 96 | } |
96 | 97 | ||
97 | return; | 98 | return 0; |
98 | } | 99 | } |
99 | 100 | ||
100 | static int kona_timer_set_next_event(unsigned long clc, | 101 | static int kona_timer_set_next_event(unsigned long clc, |
@@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc, | |||
112 | 113 | ||
113 | uint32_t lsw, msw; | 114 | uint32_t lsw, msw; |
114 | uint32_t reg; | 115 | uint32_t reg; |
116 | int ret; | ||
115 | 117 | ||
116 | kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); | 118 | ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); |
119 | if (ret) | ||
120 | return ret; | ||
117 | 121 | ||
118 | /* Load the "next" event tick value */ | 122 | /* Load the "next" event tick value */ |
119 | writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); | 123 | writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); |
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index d91e8725917c..b4b3ab5a11ad 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c | |||
@@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency) | |||
164 | gic_start_count(); | 164 | gic_start_count(); |
165 | } | 165 | } |
166 | 166 | ||
167 | static void __init gic_clocksource_of_init(struct device_node *node) | 167 | static int __init gic_clocksource_of_init(struct device_node *node) |
168 | { | 168 | { |
169 | struct clk *clk; | 169 | struct clk *clk; |
170 | int ret; | 170 | int ret; |
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 937e10b84d58..3e1cb512f3ce 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/of_irq.h> | 21 | #include <linux/of_irq.h> |
22 | #include <linux/sched_clock.h> | 22 | #include <linux/sched_clock.h> |
23 | 23 | ||
24 | #include <clocksource/pxa.h> | ||
25 | |||
24 | #include <asm/div64.h> | 26 | #include <asm/div64.h> |
25 | 27 | ||
26 | #define OSMR0 0x00 /* OS Timer 0 Match Register */ | 28 | #define OSMR0 0x00 /* OS Timer 0 Match Register */ |
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index 97669ee4df2a..c83452cacb41 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c | |||
@@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = { | |||
123 | .set_next_event = sun4i_clkevt_next_event, | 123 | .set_next_event = sun4i_clkevt_next_event, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static void sun4i_timer_clear_interrupt(void) | ||
127 | { | ||
128 | writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG); | ||
129 | } | ||
126 | 130 | ||
127 | static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) | 131 | static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) |
128 | { | 132 | { |
129 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; | 133 | struct clock_event_device *evt = (struct clock_event_device *)dev_id; |
130 | 134 | ||
131 | writel(0x1, timer_base + TIMER_IRQ_ST_REG); | 135 | sun4i_timer_clear_interrupt(); |
132 | evt->event_handler(evt); | 136 | evt->event_handler(evt); |
133 | 137 | ||
134 | return IRQ_HANDLED; | 138 | return IRQ_HANDLED; |
@@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node) | |||
208 | /* Make sure timer is stopped before playing with interrupts */ | 212 | /* Make sure timer is stopped before playing with interrupts */ |
209 | sun4i_clkevt_time_stop(0); | 213 | sun4i_clkevt_time_stop(0); |
210 | 214 | ||
215 | /* clear timer0 interrupt */ | ||
216 | sun4i_timer_clear_interrupt(); | ||
217 | |||
211 | sun4i_clockevent.cpumask = cpu_possible_mask; | 218 | sun4i_clockevent.cpumask = cpu_possible_mask; |
212 | sun4i_clockevent.irq = irq; | 219 | sun4i_clockevent.irq = irq; |
213 | 220 | ||
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 719b478d136e..3c39e6f45971 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np) | |||
338 | struct clk *clk = of_clk_get_by_name(np, "fixed"); | 338 | struct clk *clk = of_clk_get_by_name(np, "fixed"); |
339 | int ret; | 339 | int ret; |
340 | 340 | ||
341 | clk = of_clk_get(np, 0); | ||
342 | if (IS_ERR(clk)) { | 341 | if (IS_ERR(clk)) { |
343 | pr_err("Failed to get clock"); | 342 | pr_err("Failed to get clock"); |
344 | return PTR_ERR(clk); | 343 | return PTR_ERR(clk); |
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c index a7d9a08e4b0e..a8e6c7df853d 100644 --- a/drivers/clocksource/time-pistachio.c +++ b/drivers/clocksource/time-pistachio.c | |||
@@ -202,10 +202,10 @@ static int __init pistachio_clksrc_of_init(struct device_node *node) | |||
202 | rate = clk_get_rate(fast_clk); | 202 | rate = clk_get_rate(fast_clk); |
203 | 203 | ||
204 | /* Disable irq's for clocksource usage */ | 204 | /* Disable irq's for clocksource usage */ |
205 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); | 205 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); |
206 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); | 206 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); |
207 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); | 207 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); |
208 | gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); | 208 | gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); |
209 | 209 | ||
210 | /* Enable timer block */ | 210 | /* Enable timer block */ |
211 | writel(TIMER_ME_GLOBAL, pcs_gpt.base); | 211 | writel(TIMER_ME_GLOBAL, pcs_gpt.base); |
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index 1ffac0cb0cb7..3494bc5a21d5 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c | |||
@@ -261,6 +261,12 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
261 | return PTR_ERR(data->mck); | 261 | return PTR_ERR(data->mck); |
262 | } | 262 | } |
263 | 263 | ||
264 | ret = clk_prepare_enable(data->mck); | ||
265 | if (ret) { | ||
266 | pr_err("Unable to enable mck\n"); | ||
267 | return ret; | ||
268 | } | ||
269 | |||
264 | /* Get the interrupts property */ | 270 | /* Get the interrupts property */ |
265 | data->irq = irq_of_parse_and_map(node, 0); | 271 | data->irq = irq_of_parse_and_map(node, 0); |
266 | if (!data->irq) { | 272 | if (!data->irq) { |
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 87796e0864e9..d3ffde806629 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c | |||
@@ -145,11 +145,30 @@ static struct powernv_pstate_info { | |||
145 | /* Use following macros for conversions between pstate_id and index */ | 145 | /* Use following macros for conversions between pstate_id and index */ |
146 | static inline int idx_to_pstate(unsigned int i) | 146 | static inline int idx_to_pstate(unsigned int i) |
147 | { | 147 | { |
148 | if (unlikely(i >= powernv_pstate_info.nr_pstates)) { | ||
149 | pr_warn_once("index %u is out of bound\n", i); | ||
150 | return powernv_freqs[powernv_pstate_info.nominal].driver_data; | ||
151 | } | ||
152 | |||
148 | return powernv_freqs[i].driver_data; | 153 | return powernv_freqs[i].driver_data; |
149 | } | 154 | } |
150 | 155 | ||
151 | static inline unsigned int pstate_to_idx(int pstate) | 156 | static inline unsigned int pstate_to_idx(int pstate) |
152 | { | 157 | { |
158 | int min = powernv_freqs[powernv_pstate_info.min].driver_data; | ||
159 | int max = powernv_freqs[powernv_pstate_info.max].driver_data; | ||
160 | |||
161 | if (min > 0) { | ||
162 | if (unlikely((pstate < max) || (pstate > min))) { | ||
163 | pr_warn_once("pstate %d is out of bound\n", pstate); | ||
164 | return powernv_pstate_info.nominal; | ||
165 | } | ||
166 | } else { | ||
167 | if (unlikely((pstate > max) || (pstate < min))) { | ||
168 | pr_warn_once("pstate %d is out of bound\n", pstate); | ||
169 | return powernv_pstate_info.nominal; | ||
170 | } | ||
171 | } | ||
153 | /* | 172 | /* |
154 | * abs() is deliberately used so that is works with | 173 | * abs() is deliberately used so that is works with |
155 | * both monotonically increasing and decreasing | 174 | * both monotonically increasing and decreasing |
@@ -593,7 +612,7 @@ void gpstate_timer_handler(unsigned long data) | |||
593 | } else { | 612 | } else { |
594 | gpstate_idx = calc_global_pstate(gpstates->elapsed_time, | 613 | gpstate_idx = calc_global_pstate(gpstates->elapsed_time, |
595 | gpstates->highest_lpstate_idx, | 614 | gpstates->highest_lpstate_idx, |
596 | freq_data.pstate_id); | 615 | gpstates->last_lpstate_idx); |
597 | } | 616 | } |
598 | 617 | ||
599 | /* | 618 | /* |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ea8189f4b021..6dc597126b79 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
441 | OP_ALG_AAI_CTR_MOD128); | 441 | OP_ALG_AAI_CTR_MOD128); |
442 | const bool is_rfc3686 = alg->caam.rfc3686; | 442 | const bool is_rfc3686 = alg->caam.rfc3686; |
443 | 443 | ||
444 | if (!ctx->authsize) | ||
445 | return 0; | ||
446 | |||
444 | /* NULL encryption / decryption */ | 447 | /* NULL encryption / decryption */ |
445 | if (!ctx->enckeylen) | 448 | if (!ctx->enckeylen) |
446 | return aead_null_set_sh_desc(aead); | 449 | return aead_null_set_sh_desc(aead); |
@@ -614,7 +617,7 @@ skip_enc: | |||
614 | keys_fit_inline = true; | 617 | keys_fit_inline = true; |
615 | 618 | ||
616 | /* aead_givencrypt shared descriptor */ | 619 | /* aead_givencrypt shared descriptor */ |
617 | desc = ctx->sh_desc_givenc; | 620 | desc = ctx->sh_desc_enc; |
618 | 621 | ||
619 | /* Note: Context registers are saved. */ | 622 | /* Note: Context registers are saved. */ |
620 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | 623 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); |
@@ -645,13 +648,13 @@ copy_iv: | |||
645 | append_operation(desc, ctx->class2_alg_type | | 648 | append_operation(desc, ctx->class2_alg_type | |
646 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 649 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
647 | 650 | ||
648 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
649 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
650 | |||
651 | /* Read and write assoclen bytes */ | 651 | /* Read and write assoclen bytes */ |
652 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 652 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
653 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 653 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
654 | 654 | ||
655 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
656 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
657 | |||
655 | /* Skip assoc data */ | 658 | /* Skip assoc data */ |
656 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 659 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
657 | 660 | ||
@@ -697,7 +700,7 @@ copy_iv: | |||
697 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 700 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
698 | desc_bytes(desc), | 701 | desc_bytes(desc), |
699 | DMA_TO_DEVICE); | 702 | DMA_TO_DEVICE); |
700 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | 703 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { |
701 | dev_err(jrdev, "unable to map shared descriptor\n"); | 704 | dev_err(jrdev, "unable to map shared descriptor\n"); |
702 | return -ENOMEM; | 705 | return -ENOMEM; |
703 | } | 706 | } |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f1ecc8df8d41..36365b3efdfd 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
1898 | template->name); | 1898 | template->name); |
1899 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | 1899 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
1900 | template->driver_name); | 1900 | template->driver_name); |
1901 | t_alg->ahash_alg.setkey = NULL; | ||
1901 | } | 1902 | } |
1902 | alg->cra_module = THIS_MODULE; | 1903 | alg->cra_module = THIS_MODULE; |
1903 | alg->cra_init = caam_hash_cra_init; | 1904 | alg->cra_init = caam_hash_cra_init; |
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index dfb168568af1..1f01e98c83c7 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c | |||
@@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev) | |||
116 | if (rc) | 116 | if (rc) |
117 | return rc; | 117 | return rc; |
118 | 118 | ||
119 | /* adjust the dax_region resource to the start of data */ | ||
120 | res.start += le64_to_cpu(pfn_sb->dataoff); | ||
121 | |||
119 | nd_region = to_nd_region(dev->parent); | 122 | nd_region = to_nd_region(dev->parent); |
120 | dax_region = alloc_dax_region(dev, nd_region->id, &res, | 123 | dax_region = alloc_dax_region(dev, nd_region->id, &res, |
121 | le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); | 124 | le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index d0c1dab9b435..dff1a4a6dc1b 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -251,6 +251,14 @@ config EDAC_SBRIDGE | |||
251 | Support for error detection and correction the Intel | 251 | Support for error detection and correction the Intel |
252 | Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. | 252 | Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. |
253 | 253 | ||
254 | config EDAC_SKX | ||
255 | tristate "Intel Skylake server Integrated MC" | ||
256 | depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL | ||
257 | depends on PCI_MMCONFIG | ||
258 | help | ||
259 | Support for error detection and correction the Intel | ||
260 | Skylake server Integrated Memory Controllers. | ||
261 | |||
254 | config EDAC_MPC85XX | 262 | config EDAC_MPC85XX |
255 | tristate "Freescale MPC83xx / MPC85xx" | 263 | tristate "Freescale MPC83xx / MPC85xx" |
256 | depends on EDAC_MM_EDAC && FSL_SOC | 264 | depends on EDAC_MM_EDAC && FSL_SOC |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index f9e4a3e0e6e9..986049925b08 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o | |||
31 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o | 31 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o |
32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o | 32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o |
33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o | 33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o |
34 | obj-$(CONFIG_EDAC_SKX) += skx_edac.o | ||
34 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o | 35 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o |
35 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o | 36 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o |
36 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o | 37 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 4fb2eb7c800d..ce0067b7a2f6 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { | |||
552 | /* Knight's Landing Support */ | 552 | /* Knight's Landing Support */ |
553 | /* | 553 | /* |
554 | * KNL's memory channels are swizzled between memory controllers. | 554 | * KNL's memory channels are swizzled between memory controllers. |
555 | * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2 | 555 | * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2 |
556 | */ | 556 | */ |
557 | #define knl_channel_remap(channel) ((channel + 3) % 6) | 557 | #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3) |
558 | 558 | ||
559 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ | 559 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ |
560 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 | 560 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 |
@@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg) | |||
1286 | mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); | 1286 | mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); |
1287 | chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); | 1287 | chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); |
1288 | 1288 | ||
1289 | return knl_channel_remap(mc*3 + chan); | 1289 | return knl_channel_remap(mc, chan); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | /* | 1292 | /* |
@@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, | |||
2997 | } else { | 2997 | } else { |
2998 | char A = *("A"); | 2998 | char A = *("A"); |
2999 | 2999 | ||
3000 | channel = knl_channel_remap(channel); | 3000 | /* |
3001 | * Reported channel is in range 0-2, so we can't map it | ||
3002 | * back to mc. To figure out mc we check machine check | ||
3003 | * bank register that reported this error. | ||
3004 | * bank15 means mc0 and bank16 means mc1. | ||
3005 | */ | ||
3006 | channel = knl_channel_remap(m->bank == 16, channel); | ||
3001 | channel_mask = 1 << channel; | 3007 | channel_mask = 1 << channel; |
3008 | |||
3002 | snprintf(msg, sizeof(msg), | 3009 | snprintf(msg, sizeof(msg), |
3003 | "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", | 3010 | "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", |
3004 | overflow ? " OVERFLOW" : "", | 3011 | overflow ? " OVERFLOW" : "", |
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c new file mode 100644 index 000000000000..0ff4878c2aa1 --- /dev/null +++ b/drivers/edac/skx_edac.c | |||
@@ -0,0 +1,1121 @@ | |||
1 | /* | ||
2 | * EDAC driver for Intel(R) Xeon(R) Skylake processors | ||
3 | * Copyright (c) 2016, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/pci_ids.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/edac.h> | ||
22 | #include <linux/mmzone.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/bitmap.h> | ||
25 | #include <linux/math64.h> | ||
26 | #include <linux/mod_devicetable.h> | ||
27 | #include <asm/cpu_device_id.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/mce.h> | ||
30 | |||
31 | #include "edac_core.h" | ||
32 | |||
33 | #define SKX_REVISION " Ver: 1.0 " | ||
34 | |||
35 | /* | ||
36 | * Debug macros | ||
37 | */ | ||
38 | #define skx_printk(level, fmt, arg...) \ | ||
39 | edac_printk(level, "skx", fmt, ##arg) | ||
40 | |||
41 | #define skx_mc_printk(mci, level, fmt, arg...) \ | ||
42 | edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) | ||
43 | |||
44 | /* | ||
45 | * Get a bit field at register value <v>, from bit <lo> to bit <hi> | ||
46 | */ | ||
47 | #define GET_BITFIELD(v, lo, hi) \ | ||
48 | (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) | ||
49 | |||
50 | static LIST_HEAD(skx_edac_list); | ||
51 | |||
52 | static u64 skx_tolm, skx_tohm; | ||
53 | |||
54 | #define NUM_IMC 2 /* memory controllers per socket */ | ||
55 | #define NUM_CHANNELS 3 /* channels per memory controller */ | ||
56 | #define NUM_DIMMS 2 /* Max DIMMS per channel */ | ||
57 | |||
58 | #define MASK26 0x3FFFFFF /* Mask for 2^26 */ | ||
59 | #define MASK29 0x1FFFFFFF /* Mask for 2^29 */ | ||
60 | |||
61 | /* | ||
62 | * Each cpu socket contains some pci devices that provide global | ||
63 | * information, and also some that are local to each of the two | ||
64 | * memory controllers on the die. | ||
65 | */ | ||
66 | struct skx_dev { | ||
67 | struct list_head list; | ||
68 | u8 bus[4]; | ||
69 | struct pci_dev *sad_all; | ||
70 | struct pci_dev *util_all; | ||
71 | u32 mcroute; | ||
72 | struct skx_imc { | ||
73 | struct mem_ctl_info *mci; | ||
74 | u8 mc; /* system wide mc# */ | ||
75 | u8 lmc; /* socket relative mc# */ | ||
76 | u8 src_id, node_id; | ||
77 | struct skx_channel { | ||
78 | struct pci_dev *cdev; | ||
79 | struct skx_dimm { | ||
80 | u8 close_pg; | ||
81 | u8 bank_xor_enable; | ||
82 | u8 fine_grain_bank; | ||
83 | u8 rowbits; | ||
84 | u8 colbits; | ||
85 | } dimms[NUM_DIMMS]; | ||
86 | } chan[NUM_CHANNELS]; | ||
87 | } imc[NUM_IMC]; | ||
88 | }; | ||
89 | static int skx_num_sockets; | ||
90 | |||
91 | struct skx_pvt { | ||
92 | struct skx_imc *imc; | ||
93 | }; | ||
94 | |||
95 | struct decoded_addr { | ||
96 | struct skx_dev *dev; | ||
97 | u64 addr; | ||
98 | int socket; | ||
99 | int imc; | ||
100 | int channel; | ||
101 | u64 chan_addr; | ||
102 | int sktways; | ||
103 | int chanways; | ||
104 | int dimm; | ||
105 | int rank; | ||
106 | int channel_rank; | ||
107 | u64 rank_address; | ||
108 | int row; | ||
109 | int column; | ||
110 | int bank_address; | ||
111 | int bank_group; | ||
112 | }; | ||
113 | |||
114 | static struct skx_dev *get_skx_dev(u8 bus, u8 idx) | ||
115 | { | ||
116 | struct skx_dev *d; | ||
117 | |||
118 | list_for_each_entry(d, &skx_edac_list, list) { | ||
119 | if (d->bus[idx] == bus) | ||
120 | return d; | ||
121 | } | ||
122 | |||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | enum munittype { | ||
127 | CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD | ||
128 | }; | ||
129 | |||
130 | struct munit { | ||
131 | u16 did; | ||
132 | u16 devfn[NUM_IMC]; | ||
133 | u8 busidx; | ||
134 | u8 per_socket; | ||
135 | enum munittype mtype; | ||
136 | }; | ||
137 | |||
138 | /* | ||
139 | * List of PCI device ids that we need together with some device | ||
140 | * number and function numbers to tell which memory controller the | ||
141 | * device belongs to. | ||
142 | */ | ||
143 | static const struct munit skx_all_munits[] = { | ||
144 | { 0x2054, { }, 1, 1, SAD_ALL }, | ||
145 | { 0x2055, { }, 1, 1, UTIL_ALL }, | ||
146 | { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, | ||
147 | { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, | ||
148 | { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, | ||
149 | { 0x208e, { }, 1, 0, SAD }, | ||
150 | { } | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * We use the per-socket device 0x2016 to count how many sockets are present, | ||
155 | * and to detemine which PCI buses are associated with each socket. Allocate | ||
156 | * and build the full list of all the skx_dev structures that we need here. | ||
157 | */ | ||
158 | static int get_all_bus_mappings(void) | ||
159 | { | ||
160 | struct pci_dev *pdev, *prev; | ||
161 | struct skx_dev *d; | ||
162 | u32 reg; | ||
163 | int ndev = 0; | ||
164 | |||
165 | prev = NULL; | ||
166 | for (;;) { | ||
167 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev); | ||
168 | if (!pdev) | ||
169 | break; | ||
170 | ndev++; | ||
171 | d = kzalloc(sizeof(*d), GFP_KERNEL); | ||
172 | if (!d) { | ||
173 | pci_dev_put(pdev); | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | pci_read_config_dword(pdev, 0xCC, ®); | ||
177 | d->bus[0] = GET_BITFIELD(reg, 0, 7); | ||
178 | d->bus[1] = GET_BITFIELD(reg, 8, 15); | ||
179 | d->bus[2] = GET_BITFIELD(reg, 16, 23); | ||
180 | d->bus[3] = GET_BITFIELD(reg, 24, 31); | ||
181 | edac_dbg(2, "busses: %x, %x, %x, %x\n", | ||
182 | d->bus[0], d->bus[1], d->bus[2], d->bus[3]); | ||
183 | list_add_tail(&d->list, &skx_edac_list); | ||
184 | skx_num_sockets++; | ||
185 | prev = pdev; | ||
186 | } | ||
187 | |||
188 | return ndev; | ||
189 | } | ||
190 | |||
191 | static int get_all_munits(const struct munit *m) | ||
192 | { | ||
193 | struct pci_dev *pdev, *prev; | ||
194 | struct skx_dev *d; | ||
195 | u32 reg; | ||
196 | int i = 0, ndev = 0; | ||
197 | |||
198 | prev = NULL; | ||
199 | for (;;) { | ||
200 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev); | ||
201 | if (!pdev) | ||
202 | break; | ||
203 | ndev++; | ||
204 | if (m->per_socket == NUM_IMC) { | ||
205 | for (i = 0; i < NUM_IMC; i++) | ||
206 | if (m->devfn[i] == pdev->devfn) | ||
207 | break; | ||
208 | if (i == NUM_IMC) | ||
209 | goto fail; | ||
210 | } | ||
211 | d = get_skx_dev(pdev->bus->number, m->busidx); | ||
212 | if (!d) | ||
213 | goto fail; | ||
214 | |||
215 | /* Be sure that the device is enabled */ | ||
216 | if (unlikely(pci_enable_device(pdev) < 0)) { | ||
217 | skx_printk(KERN_ERR, | ||
218 | "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did); | ||
219 | goto fail; | ||
220 | } | ||
221 | |||
222 | switch (m->mtype) { | ||
223 | case CHAN0: case CHAN1: case CHAN2: | ||
224 | pci_dev_get(pdev); | ||
225 | d->imc[i].chan[m->mtype].cdev = pdev; | ||
226 | break; | ||
227 | case SAD_ALL: | ||
228 | pci_dev_get(pdev); | ||
229 | d->sad_all = pdev; | ||
230 | break; | ||
231 | case UTIL_ALL: | ||
232 | pci_dev_get(pdev); | ||
233 | d->util_all = pdev; | ||
234 | break; | ||
235 | case SAD: | ||
236 | /* | ||
237 | * one of these devices per core, including cores | ||
238 | * that don't exist on this SKU. Ignore any that | ||
239 | * read a route table of zero, make sure all the | ||
240 | * non-zero values match. | ||
241 | */ | ||
242 | pci_read_config_dword(pdev, 0xB4, ®); | ||
243 | if (reg != 0) { | ||
244 | if (d->mcroute == 0) | ||
245 | d->mcroute = reg; | ||
246 | else if (d->mcroute != reg) { | ||
247 | skx_printk(KERN_ERR, | ||
248 | "mcroute mismatch\n"); | ||
249 | goto fail; | ||
250 | } | ||
251 | } | ||
252 | ndev--; | ||
253 | break; | ||
254 | } | ||
255 | |||
256 | prev = pdev; | ||
257 | } | ||
258 | |||
259 | return ndev; | ||
260 | fail: | ||
261 | pci_dev_put(pdev); | ||
262 | return -ENODEV; | ||
263 | } | ||
264 | |||
265 | const struct x86_cpu_id skx_cpuids[] = { | ||
266 | { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */ | ||
267 | { } | ||
268 | }; | ||
269 | MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); | ||
270 | |||
271 | static u8 get_src_id(struct skx_dev *d) | ||
272 | { | ||
273 | u32 reg; | ||
274 | |||
275 | pci_read_config_dword(d->util_all, 0xF0, ®); | ||
276 | |||
277 | return GET_BITFIELD(reg, 12, 14); | ||
278 | } | ||
279 | |||
280 | static u8 skx_get_node_id(struct skx_dev *d) | ||
281 | { | ||
282 | u32 reg; | ||
283 | |||
284 | pci_read_config_dword(d->util_all, 0xF4, ®); | ||
285 | |||
286 | return GET_BITFIELD(reg, 0, 2); | ||
287 | } | ||
288 | |||
289 | static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, | ||
290 | int maxval, char *name) | ||
291 | { | ||
292 | u32 val = GET_BITFIELD(reg, lobit, hibit); | ||
293 | |||
294 | if (val < minval || val > maxval) { | ||
295 | edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | return val + add; | ||
299 | } | ||
300 | |||
301 | #define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15) | ||
302 | |||
303 | #define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks") | ||
304 | #define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows") | ||
305 | #define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols") | ||
306 | |||
307 | static int get_width(u32 mtr) | ||
308 | { | ||
309 | switch (GET_BITFIELD(mtr, 8, 9)) { | ||
310 | case 0: | ||
311 | return DEV_X4; | ||
312 | case 1: | ||
313 | return DEV_X8; | ||
314 | case 2: | ||
315 | return DEV_X16; | ||
316 | } | ||
317 | return DEV_UNKNOWN; | ||
318 | } | ||
319 | |||
320 | static int skx_get_hi_lo(void) | ||
321 | { | ||
322 | struct pci_dev *pdev; | ||
323 | u32 reg; | ||
324 | |||
325 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL); | ||
326 | if (!pdev) { | ||
327 | edac_dbg(0, "Can't get tolm/tohm\n"); | ||
328 | return -ENODEV; | ||
329 | } | ||
330 | |||
331 | pci_read_config_dword(pdev, 0xD0, ®); | ||
332 | skx_tolm = reg; | ||
333 | pci_read_config_dword(pdev, 0xD4, ®); | ||
334 | skx_tohm = reg; | ||
335 | pci_read_config_dword(pdev, 0xD8, ®); | ||
336 | skx_tohm |= (u64)reg << 32; | ||
337 | |||
338 | pci_dev_put(pdev); | ||
339 | edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, | ||
345 | struct skx_imc *imc, int chan, int dimmno) | ||
346 | { | ||
347 | int banks = 16, ranks, rows, cols, npages; | ||
348 | u64 size; | ||
349 | |||
350 | if (!IS_DIMM_PRESENT(mtr)) | ||
351 | return 0; | ||
352 | ranks = numrank(mtr); | ||
353 | rows = numrow(mtr); | ||
354 | cols = numcol(mtr); | ||
355 | |||
356 | /* | ||
357 | * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) | ||
358 | */ | ||
359 | size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); | ||
360 | npages = MiB_TO_PAGES(size); | ||
361 | |||
362 | edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", | ||
363 | imc->mc, chan, dimmno, size, npages, | ||
364 | banks, ranks, rows, cols); | ||
365 | |||
366 | imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); | ||
367 | imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); | ||
368 | imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); | ||
369 | imc->chan[chan].dimms[dimmno].rowbits = rows; | ||
370 | imc->chan[chan].dimms[dimmno].colbits = cols; | ||
371 | |||
372 | dimm->nr_pages = npages; | ||
373 | dimm->grain = 32; | ||
374 | dimm->dtype = get_width(mtr); | ||
375 | dimm->mtype = MEM_DDR4; | ||
376 | dimm->edac_mode = EDAC_SECDED; /* likely better than this */ | ||
377 | snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", | ||
378 | imc->src_id, imc->lmc, chan, dimmno); | ||
379 | |||
380 | return 1; | ||
381 | } | ||
382 | |||
383 | #define SKX_GET_MTMTR(dev, reg) \ | ||
384 | pci_read_config_dword((dev), 0x87c, ®) | ||
385 | |||
386 | static bool skx_check_ecc(struct pci_dev *pdev) | ||
387 | { | ||
388 | u32 mtmtr; | ||
389 | |||
390 | SKX_GET_MTMTR(pdev, mtmtr); | ||
391 | |||
392 | return !!GET_BITFIELD(mtmtr, 2, 2); | ||
393 | } | ||
394 | |||
395 | static int skx_get_dimm_config(struct mem_ctl_info *mci) | ||
396 | { | ||
397 | struct skx_pvt *pvt = mci->pvt_info; | ||
398 | struct skx_imc *imc = pvt->imc; | ||
399 | struct dimm_info *dimm; | ||
400 | int i, j; | ||
401 | u32 mtr, amap; | ||
402 | int ndimms; | ||
403 | |||
404 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
405 | ndimms = 0; | ||
406 | pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); | ||
407 | for (j = 0; j < NUM_DIMMS; j++) { | ||
408 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, | ||
409 | mci->n_layers, i, j, 0); | ||
410 | pci_read_config_dword(imc->chan[i].cdev, | ||
411 | 0x80 + 4*j, &mtr); | ||
412 | ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j); | ||
413 | } | ||
414 | if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { | ||
415 | skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); | ||
416 | return -ENODEV; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static void skx_unregister_mci(struct skx_imc *imc) | ||
424 | { | ||
425 | struct mem_ctl_info *mci = imc->mci; | ||
426 | |||
427 | if (!mci) | ||
428 | return; | ||
429 | |||
430 | edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); | ||
431 | |||
432 | /* Remove MC sysfs nodes */ | ||
433 | edac_mc_del_mc(mci->pdev); | ||
434 | |||
435 | edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); | ||
436 | kfree(mci->ctl_name); | ||
437 | edac_mc_free(mci); | ||
438 | } | ||
439 | |||
440 | static int skx_register_mci(struct skx_imc *imc) | ||
441 | { | ||
442 | struct mem_ctl_info *mci; | ||
443 | struct edac_mc_layer layers[2]; | ||
444 | struct pci_dev *pdev = imc->chan[0].cdev; | ||
445 | struct skx_pvt *pvt; | ||
446 | int rc; | ||
447 | |||
448 | /* allocate a new MC control structure */ | ||
449 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | ||
450 | layers[0].size = NUM_CHANNELS; | ||
451 | layers[0].is_virt_csrow = false; | ||
452 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
453 | layers[1].size = NUM_DIMMS; | ||
454 | layers[1].is_virt_csrow = true; | ||
455 | mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, | ||
456 | sizeof(struct skx_pvt)); | ||
457 | |||
458 | if (unlikely(!mci)) | ||
459 | return -ENOMEM; | ||
460 | |||
461 | edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); | ||
462 | |||
463 | /* Associate skx_dev and mci for future usage */ | ||
464 | imc->mci = mci; | ||
465 | pvt = mci->pvt_info; | ||
466 | pvt->imc = imc; | ||
467 | |||
468 | mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", | ||
469 | imc->node_id, imc->lmc); | ||
470 | mci->mtype_cap = MEM_FLAG_DDR4; | ||
471 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | ||
472 | mci->edac_cap = EDAC_FLAG_NONE; | ||
473 | mci->mod_name = "skx_edac.c"; | ||
474 | mci->dev_name = pci_name(imc->chan[0].cdev); | ||
475 | mci->mod_ver = SKX_REVISION; | ||
476 | mci->ctl_page_to_phys = NULL; | ||
477 | |||
478 | rc = skx_get_dimm_config(mci); | ||
479 | if (rc < 0) | ||
480 | goto fail; | ||
481 | |||
482 | /* record ptr to the generic device */ | ||
483 | mci->pdev = &pdev->dev; | ||
484 | |||
485 | /* add this new MC control structure to EDAC's list of MCs */ | ||
486 | if (unlikely(edac_mc_add_mc(mci))) { | ||
487 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | ||
488 | rc = -EINVAL; | ||
489 | goto fail; | ||
490 | } | ||
491 | |||
492 | return 0; | ||
493 | |||
494 | fail: | ||
495 | kfree(mci->ctl_name); | ||
496 | edac_mc_free(mci); | ||
497 | imc->mci = NULL; | ||
498 | return rc; | ||
499 | } | ||
500 | |||
501 | #define SKX_MAX_SAD 24 | ||
502 | |||
503 | #define SKX_GET_SAD(d, i, reg) \ | ||
504 | pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), ®) | ||
505 | #define SKX_GET_ILV(d, i, reg) \ | ||
506 | pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), ®) | ||
507 | |||
508 | #define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31) | ||
509 | #define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27) | ||
510 | #define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26) | ||
511 | #define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6) | ||
512 | #define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4) | ||
513 | #define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2) | ||
514 | #define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0) | ||
515 | |||
516 | #define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) | ||
517 | #define SKX_ILV_TARGET(tgt) ((tgt) & 7) | ||
518 | |||
519 | static bool skx_sad_decode(struct decoded_addr *res) | ||
520 | { | ||
521 | struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list); | ||
522 | u64 addr = res->addr; | ||
523 | int i, idx, tgt, lchan, shift; | ||
524 | u32 sad, ilv; | ||
525 | u64 limit, prev_limit; | ||
526 | int remote = 0; | ||
527 | |||
528 | /* Simple sanity check for I/O space or out of range */ | ||
529 | if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) { | ||
530 | edac_dbg(0, "Address %llx out of range\n", addr); | ||
531 | return false; | ||
532 | } | ||
533 | |||
534 | restart: | ||
535 | prev_limit = 0; | ||
536 | for (i = 0; i < SKX_MAX_SAD; i++) { | ||
537 | SKX_GET_SAD(d, i, sad); | ||
538 | limit = SKX_SAD_LIMIT(sad); | ||
539 | if (SKX_SAD_ENABLE(sad)) { | ||
540 | if (addr >= prev_limit && addr <= limit) | ||
541 | goto sad_found; | ||
542 | } | ||
543 | prev_limit = limit + 1; | ||
544 | } | ||
545 | edac_dbg(0, "No SAD entry for %llx\n", addr); | ||
546 | return false; | ||
547 | |||
548 | sad_found: | ||
549 | SKX_GET_ILV(d, i, ilv); | ||
550 | |||
551 | switch (SKX_SAD_INTERLEAVE(sad)) { | ||
552 | case 0: | ||
553 | idx = GET_BITFIELD(addr, 6, 8); | ||
554 | break; | ||
555 | case 1: | ||
556 | idx = GET_BITFIELD(addr, 8, 10); | ||
557 | break; | ||
558 | case 2: | ||
559 | idx = GET_BITFIELD(addr, 12, 14); | ||
560 | break; | ||
561 | case 3: | ||
562 | idx = GET_BITFIELD(addr, 30, 32); | ||
563 | break; | ||
564 | } | ||
565 | |||
566 | tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3); | ||
567 | |||
568 | /* If point to another node, find it and start over */ | ||
569 | if (SKX_ILV_REMOTE(tgt)) { | ||
570 | if (remote) { | ||
571 | edac_dbg(0, "Double remote!\n"); | ||
572 | return false; | ||
573 | } | ||
574 | remote = 1; | ||
575 | list_for_each_entry(d, &skx_edac_list, list) { | ||
576 | if (d->imc[0].src_id == SKX_ILV_TARGET(tgt)) | ||
577 | goto restart; | ||
578 | } | ||
579 | edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt)); | ||
580 | return false; | ||
581 | } | ||
582 | |||
583 | if (SKX_SAD_MOD3(sad) == 0) | ||
584 | lchan = SKX_ILV_TARGET(tgt); | ||
585 | else { | ||
586 | switch (SKX_SAD_MOD3MODE(sad)) { | ||
587 | case 0: | ||
588 | shift = 6; | ||
589 | break; | ||
590 | case 1: | ||
591 | shift = 8; | ||
592 | break; | ||
593 | case 2: | ||
594 | shift = 12; | ||
595 | break; | ||
596 | default: | ||
597 | edac_dbg(0, "illegal mod3mode\n"); | ||
598 | return false; | ||
599 | } | ||
600 | switch (SKX_SAD_MOD3ASMOD2(sad)) { | ||
601 | case 0: | ||
602 | lchan = (addr >> shift) % 3; | ||
603 | break; | ||
604 | case 1: | ||
605 | lchan = (addr >> shift) % 2; | ||
606 | break; | ||
607 | case 2: | ||
608 | lchan = (addr >> shift) % 2; | ||
609 | lchan = (lchan << 1) | ~lchan; | ||
610 | break; | ||
611 | case 3: | ||
612 | lchan = ((addr >> shift) % 2) << 1; | ||
613 | break; | ||
614 | } | ||
615 | lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1); | ||
616 | } | ||
617 | |||
618 | res->dev = d; | ||
619 | res->socket = d->imc[0].src_id; | ||
620 | res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2); | ||
621 | res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19); | ||
622 | |||
623 | edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n", | ||
624 | res->addr, res->socket, res->imc, res->channel); | ||
625 | return true; | ||
626 | } | ||
627 | |||
628 | #define SKX_MAX_TAD 8 | ||
629 | |||
630 | #define SKX_GET_TADBASE(d, mc, i, reg) \ | ||
631 | pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), ®) | ||
632 | #define SKX_GET_TADWAYNESS(d, mc, i, reg) \ | ||
633 | pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), ®) | ||
634 | #define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \ | ||
635 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), ®) | ||
636 | |||
637 | #define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26) | ||
638 | #define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5) | ||
639 | #define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7) | ||
640 | #define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26) | ||
641 | #define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26) | ||
642 | #define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11)) | ||
643 | #define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1) | ||
644 | |||
645 | /* which bit used for both socket and channel interleave */ | ||
646 | static int skx_granularity[] = { 6, 8, 12, 30 }; | ||
647 | |||
648 | static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits) | ||
649 | { | ||
650 | addr >>= shift; | ||
651 | addr /= ways; | ||
652 | addr <<= shift; | ||
653 | |||
654 | return addr | (lowbits & ((1ull << shift) - 1)); | ||
655 | } | ||
656 | |||
657 | static bool skx_tad_decode(struct decoded_addr *res) | ||
658 | { | ||
659 | int i; | ||
660 | u32 base, wayness, chnilvoffset; | ||
661 | int skt_interleave_bit, chn_interleave_bit; | ||
662 | u64 channel_addr; | ||
663 | |||
664 | for (i = 0; i < SKX_MAX_TAD; i++) { | ||
665 | SKX_GET_TADBASE(res->dev, res->imc, i, base); | ||
666 | SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness); | ||
667 | if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness)) | ||
668 | goto tad_found; | ||
669 | } | ||
670 | edac_dbg(0, "No TAD entry for %llx\n", res->addr); | ||
671 | return false; | ||
672 | |||
673 | tad_found: | ||
674 | res->sktways = SKX_TAD_SKTWAYS(wayness); | ||
675 | res->chanways = SKX_TAD_CHNWAYS(wayness); | ||
676 | skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)]; | ||
677 | chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)]; | ||
678 | |||
679 | SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset); | ||
680 | channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset); | ||
681 | |||
682 | if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) { | ||
683 | /* Must handle channel first, then socket */ | ||
684 | channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, | ||
685 | res->chanways, channel_addr); | ||
686 | channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, | ||
687 | res->sktways, channel_addr); | ||
688 | } else { | ||
689 | /* Handle socket then channel. Preserve low bits from original address */ | ||
690 | channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, | ||
691 | res->sktways, res->addr); | ||
692 | channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, | ||
693 | res->chanways, res->addr); | ||
694 | } | ||
695 | |||
696 | res->chan_addr = channel_addr; | ||
697 | |||
698 | edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n", | ||
699 | res->addr, res->chan_addr, res->sktways, res->chanways); | ||
700 | return true; | ||
701 | } | ||
702 | |||
703 | #define SKX_MAX_RIR 4 | ||
704 | |||
705 | #define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \ | ||
706 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ | ||
707 | 0x108 + 4 * (i), ®) | ||
708 | #define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \ | ||
709 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ | ||
710 | 0x120 + 16 * idx + 4 * (i), ®) | ||
711 | |||
712 | #define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31) | ||
713 | #define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29) | ||
714 | #define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29)) | ||
715 | #define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19) | ||
716 | #define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26)) | ||
717 | |||
718 | static bool skx_rir_decode(struct decoded_addr *res) | ||
719 | { | ||
720 | int i, idx, chan_rank; | ||
721 | int shift; | ||
722 | u32 rirway, rirlv; | ||
723 | u64 rank_addr, prev_limit = 0, limit; | ||
724 | |||
725 | if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg) | ||
726 | shift = 6; | ||
727 | else | ||
728 | shift = 13; | ||
729 | |||
730 | for (i = 0; i < SKX_MAX_RIR; i++) { | ||
731 | SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway); | ||
732 | limit = SKX_RIR_LIMIT(rirway); | ||
733 | if (SKX_RIR_VALID(rirway)) { | ||
734 | if (prev_limit <= res->chan_addr && | ||
735 | res->chan_addr <= limit) | ||
736 | goto rir_found; | ||
737 | } | ||
738 | prev_limit = limit; | ||
739 | } | ||
740 | edac_dbg(0, "No RIR entry for %llx\n", res->addr); | ||
741 | return false; | ||
742 | |||
743 | rir_found: | ||
744 | rank_addr = res->chan_addr >> shift; | ||
745 | rank_addr /= SKX_RIR_WAYS(rirway); | ||
746 | rank_addr <<= shift; | ||
747 | rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0); | ||
748 | |||
749 | res->rank_address = rank_addr; | ||
750 | idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway); | ||
751 | |||
752 | SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv); | ||
753 | res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv); | ||
754 | chan_rank = SKX_RIR_CHAN_RANK(rirlv); | ||
755 | res->channel_rank = chan_rank; | ||
756 | res->dimm = chan_rank / 4; | ||
757 | res->rank = chan_rank % 4; | ||
758 | |||
759 | edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n", | ||
760 | res->addr, res->dimm, res->rank, | ||
761 | res->channel_rank, res->rank_address); | ||
762 | return true; | ||
763 | } | ||
764 | |||
765 | static u8 skx_close_row[] = { | ||
766 | 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 | ||
767 | }; | ||
768 | static u8 skx_close_column[] = { | ||
769 | 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 | ||
770 | }; | ||
771 | static u8 skx_open_row[] = { | ||
772 | 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 | ||
773 | }; | ||
774 | static u8 skx_open_column[] = { | ||
775 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 | ||
776 | }; | ||
777 | static u8 skx_open_fine_column[] = { | ||
778 | 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 | ||
779 | }; | ||
780 | |||
781 | static int skx_bits(u64 addr, int nbits, u8 *bits) | ||
782 | { | ||
783 | int i, res = 0; | ||
784 | |||
785 | for (i = 0; i < nbits; i++) | ||
786 | res |= ((addr >> bits[i]) & 1) << i; | ||
787 | return res; | ||
788 | } | ||
789 | |||
790 | static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1) | ||
791 | { | ||
792 | int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1); | ||
793 | |||
794 | if (do_xor) | ||
795 | ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1); | ||
796 | |||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | static bool skx_mad_decode(struct decoded_addr *r) | ||
801 | { | ||
802 | struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm]; | ||
803 | int bg0 = dimm->fine_grain_bank ? 6 : 13; | ||
804 | |||
805 | if (dimm->close_pg) { | ||
806 | r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row); | ||
807 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column); | ||
808 | r->column |= 0x400; /* C10 is autoprecharge, always set */ | ||
809 | r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28); | ||
810 | r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21); | ||
811 | } else { | ||
812 | r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row); | ||
813 | if (dimm->fine_grain_bank) | ||
814 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column); | ||
815 | else | ||
816 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column); | ||
817 | r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23); | ||
818 | r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21); | ||
819 | } | ||
820 | r->row &= (1u << dimm->rowbits) - 1; | ||
821 | |||
822 | edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n", | ||
823 | r->addr, r->row, r->column, r->bank_address, | ||
824 | r->bank_group); | ||
825 | return true; | ||
826 | } | ||
827 | |||
828 | static bool skx_decode(struct decoded_addr *res) | ||
829 | { | ||
830 | |||
831 | return skx_sad_decode(res) && skx_tad_decode(res) && | ||
832 | skx_rir_decode(res) && skx_mad_decode(res); | ||
833 | } | ||
834 | |||
835 | #ifdef CONFIG_EDAC_DEBUG | ||
836 | /* | ||
837 | * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr. | ||
838 | * Write an address to this file to exercise the address decode | ||
839 | * logic in this driver. | ||
840 | */ | ||
841 | static struct dentry *skx_test; | ||
842 | static u64 skx_fake_addr; | ||
843 | |||
844 | static int debugfs_u64_set(void *data, u64 val) | ||
845 | { | ||
846 | struct decoded_addr res; | ||
847 | |||
848 | res.addr = val; | ||
849 | skx_decode(&res); | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); | ||
855 | |||
856 | static struct dentry *mydebugfs_create(const char *name, umode_t mode, | ||
857 | struct dentry *parent, u64 *value) | ||
858 | { | ||
859 | return debugfs_create_file(name, mode, parent, value, &fops_u64_wo); | ||
860 | } | ||
861 | |||
862 | static void setup_skx_debug(void) | ||
863 | { | ||
864 | skx_test = debugfs_create_dir("skx_edac_test", NULL); | ||
865 | mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr); | ||
866 | } | ||
867 | |||
868 | static void teardown_skx_debug(void) | ||
869 | { | ||
870 | debugfs_remove_recursive(skx_test); | ||
871 | } | ||
872 | #else | ||
873 | static void setup_skx_debug(void) | ||
874 | { | ||
875 | } | ||
876 | |||
877 | static void teardown_skx_debug(void) | ||
878 | { | ||
879 | } | ||
880 | #endif /*CONFIG_EDAC_DEBUG*/ | ||
881 | |||
882 | static void skx_mce_output_error(struct mem_ctl_info *mci, | ||
883 | const struct mce *m, | ||
884 | struct decoded_addr *res) | ||
885 | { | ||
886 | enum hw_event_mc_err_type tp_event; | ||
887 | char *type, *optype, msg[256]; | ||
888 | bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); | ||
889 | bool overflow = GET_BITFIELD(m->status, 62, 62); | ||
890 | bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); | ||
891 | bool recoverable; | ||
892 | u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); | ||
893 | u32 mscod = GET_BITFIELD(m->status, 16, 31); | ||
894 | u32 errcode = GET_BITFIELD(m->status, 0, 15); | ||
895 | u32 optypenum = GET_BITFIELD(m->status, 4, 6); | ||
896 | |||
897 | recoverable = GET_BITFIELD(m->status, 56, 56); | ||
898 | |||
899 | if (uncorrected_error) { | ||
900 | if (ripv) { | ||
901 | type = "FATAL"; | ||
902 | tp_event = HW_EVENT_ERR_FATAL; | ||
903 | } else { | ||
904 | type = "NON_FATAL"; | ||
905 | tp_event = HW_EVENT_ERR_UNCORRECTED; | ||
906 | } | ||
907 | } else { | ||
908 | type = "CORRECTED"; | ||
909 | tp_event = HW_EVENT_ERR_CORRECTED; | ||
910 | } | ||
911 | |||
912 | /* | ||
913 | * According with Table 15-9 of the Intel Architecture spec vol 3A, | ||
914 | * memory errors should fit in this mask: | ||
915 | * 000f 0000 1mmm cccc (binary) | ||
916 | * where: | ||
917 | * f = Correction Report Filtering Bit. If 1, subsequent errors | ||
918 | * won't be shown | ||
919 | * mmm = error type | ||
920 | * cccc = channel | ||
921 | * If the mask doesn't match, report an error to the parsing logic | ||
922 | */ | ||
923 | if (!((errcode & 0xef80) == 0x80)) { | ||
924 | optype = "Can't parse: it is not a mem"; | ||
925 | } else { | ||
926 | switch (optypenum) { | ||
927 | case 0: | ||
928 | optype = "generic undef request error"; | ||
929 | break; | ||
930 | case 1: | ||
931 | optype = "memory read error"; | ||
932 | break; | ||
933 | case 2: | ||
934 | optype = "memory write error"; | ||
935 | break; | ||
936 | case 3: | ||
937 | optype = "addr/cmd error"; | ||
938 | break; | ||
939 | case 4: | ||
940 | optype = "memory scrubbing error"; | ||
941 | break; | ||
942 | default: | ||
943 | optype = "reserved"; | ||
944 | break; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | snprintf(msg, sizeof(msg), | ||
949 | "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", | ||
950 | overflow ? " OVERFLOW" : "", | ||
951 | (uncorrected_error && recoverable) ? " recoverable" : "", | ||
952 | mscod, errcode, | ||
953 | res->socket, res->imc, res->rank, | ||
954 | res->bank_group, res->bank_address, res->row, res->column); | ||
955 | |||
956 | edac_dbg(0, "%s\n", msg); | ||
957 | |||
958 | /* Call the helper to output message */ | ||
959 | edac_mc_handle_error(tp_event, mci, core_err_cnt, | ||
960 | m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, | ||
961 | res->channel, res->dimm, -1, | ||
962 | optype, msg); | ||
963 | } | ||
964 | |||
965 | static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, | ||
966 | void *data) | ||
967 | { | ||
968 | struct mce *mce = (struct mce *)data; | ||
969 | struct decoded_addr res; | ||
970 | struct mem_ctl_info *mci; | ||
971 | char *type; | ||
972 | |||
973 | if (get_edac_report_status() == EDAC_REPORTING_DISABLED) | ||
974 | return NOTIFY_DONE; | ||
975 | |||
976 | /* ignore unless this is memory related with an address */ | ||
977 | if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) | ||
978 | return NOTIFY_DONE; | ||
979 | |||
980 | res.addr = mce->addr; | ||
981 | if (!skx_decode(&res)) | ||
982 | return NOTIFY_DONE; | ||
983 | mci = res.dev->imc[res.imc].mci; | ||
984 | |||
985 | if (mce->mcgstatus & MCG_STATUS_MCIP) | ||
986 | type = "Exception"; | ||
987 | else | ||
988 | type = "Event"; | ||
989 | |||
990 | skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); | ||
991 | |||
992 | skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " | ||
993 | "Bank %d: %016Lx\n", mce->extcpu, type, | ||
994 | mce->mcgstatus, mce->bank, mce->status); | ||
995 | skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); | ||
996 | skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); | ||
997 | skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); | ||
998 | |||
999 | skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " | ||
1000 | "%u APIC %x\n", mce->cpuvendor, mce->cpuid, | ||
1001 | mce->time, mce->socketid, mce->apicid); | ||
1002 | |||
1003 | skx_mce_output_error(mci, mce, &res); | ||
1004 | |||
1005 | return NOTIFY_DONE; | ||
1006 | } | ||
1007 | |||
1008 | static struct notifier_block skx_mce_dec = { | ||
1009 | .notifier_call = skx_mce_check_error, | ||
1010 | }; | ||
1011 | |||
1012 | static void skx_remove(void) | ||
1013 | { | ||
1014 | int i, j; | ||
1015 | struct skx_dev *d, *tmp; | ||
1016 | |||
1017 | edac_dbg(0, "\n"); | ||
1018 | |||
1019 | list_for_each_entry_safe(d, tmp, &skx_edac_list, list) { | ||
1020 | list_del(&d->list); | ||
1021 | for (i = 0; i < NUM_IMC; i++) { | ||
1022 | skx_unregister_mci(&d->imc[i]); | ||
1023 | for (j = 0; j < NUM_CHANNELS; j++) | ||
1024 | pci_dev_put(d->imc[i].chan[j].cdev); | ||
1025 | } | ||
1026 | pci_dev_put(d->util_all); | ||
1027 | pci_dev_put(d->sad_all); | ||
1028 | |||
1029 | kfree(d); | ||
1030 | } | ||
1031 | } | ||
1032 | |||
1033 | /* | ||
1034 | * skx_init: | ||
1035 | * make sure we are running on the correct cpu model | ||
1036 | * search for all the devices we need | ||
1037 | * check which DIMMs are present. | ||
1038 | */ | ||
1039 | int __init skx_init(void) | ||
1040 | { | ||
1041 | const struct x86_cpu_id *id; | ||
1042 | const struct munit *m; | ||
1043 | int rc = 0, i; | ||
1044 | u8 mc = 0, src_id, node_id; | ||
1045 | struct skx_dev *d; | ||
1046 | |||
1047 | edac_dbg(2, "\n"); | ||
1048 | |||
1049 | id = x86_match_cpu(skx_cpuids); | ||
1050 | if (!id) | ||
1051 | return -ENODEV; | ||
1052 | |||
1053 | rc = skx_get_hi_lo(); | ||
1054 | if (rc) | ||
1055 | return rc; | ||
1056 | |||
1057 | rc = get_all_bus_mappings(); | ||
1058 | if (rc < 0) | ||
1059 | goto fail; | ||
1060 | if (rc == 0) { | ||
1061 | edac_dbg(2, "No memory controllers found\n"); | ||
1062 | return -ENODEV; | ||
1063 | } | ||
1064 | |||
1065 | for (m = skx_all_munits; m->did; m++) { | ||
1066 | rc = get_all_munits(m); | ||
1067 | if (rc < 0) | ||
1068 | goto fail; | ||
1069 | if (rc != m->per_socket * skx_num_sockets) { | ||
1070 | edac_dbg(2, "Expected %d, got %d of %x\n", | ||
1071 | m->per_socket * skx_num_sockets, rc, m->did); | ||
1072 | rc = -ENODEV; | ||
1073 | goto fail; | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | list_for_each_entry(d, &skx_edac_list, list) { | ||
1078 | src_id = get_src_id(d); | ||
1079 | node_id = skx_get_node_id(d); | ||
1080 | edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id); | ||
1081 | for (i = 0; i < NUM_IMC; i++) { | ||
1082 | d->imc[i].mc = mc++; | ||
1083 | d->imc[i].lmc = i; | ||
1084 | d->imc[i].src_id = src_id; | ||
1085 | d->imc[i].node_id = node_id; | ||
1086 | rc = skx_register_mci(&d->imc[i]); | ||
1087 | if (rc < 0) | ||
1088 | goto fail; | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1093 | opstate_init(); | ||
1094 | |||
1095 | setup_skx_debug(); | ||
1096 | |||
1097 | mce_register_decode_chain(&skx_mce_dec); | ||
1098 | |||
1099 | return 0; | ||
1100 | fail: | ||
1101 | skx_remove(); | ||
1102 | return rc; | ||
1103 | } | ||
1104 | |||
1105 | static void __exit skx_exit(void) | ||
1106 | { | ||
1107 | edac_dbg(2, "\n"); | ||
1108 | mce_unregister_decode_chain(&skx_mce_dec); | ||
1109 | skx_remove(); | ||
1110 | teardown_skx_debug(); | ||
1111 | } | ||
1112 | |||
1113 | module_init(skx_init); | ||
1114 | module_exit(skx_exit); | ||
1115 | |||
1116 | module_param(edac_op_state, int, 0444); | ||
1117 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
1118 | |||
1119 | MODULE_LICENSE("GPL v2"); | ||
1120 | MODULE_AUTHOR("Tony Luck"); | ||
1121 | MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors"); | ||
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index c99c24bc79b0..9ae6c116c474 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/efi.h> | 18 | #include <linux/efi.h> |
19 | #include <linux/vmalloc.h> | ||
19 | 20 | ||
20 | #define NO_FURTHER_WRITE_ACTION -1 | 21 | #define NO_FURTHER_WRITE_ACTION -1 |
21 | 22 | ||
@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) | |||
108 | int ret; | 109 | int ret; |
109 | void *cap_hdr_temp; | 110 | void *cap_hdr_temp; |
110 | 111 | ||
111 | cap_hdr_temp = kmap(cap_info->pages[0]); | 112 | cap_hdr_temp = vmap(cap_info->pages, cap_info->index, |
113 | VM_MAP, PAGE_KERNEL); | ||
112 | if (!cap_hdr_temp) { | 114 | if (!cap_hdr_temp) { |
113 | pr_debug("%s: kmap() failed\n", __func__); | 115 | pr_debug("%s: vmap() failed\n", __func__); |
114 | return -EFAULT; | 116 | return -EFAULT; |
115 | } | 117 | } |
116 | 118 | ||
117 | ret = efi_capsule_update(cap_hdr_temp, cap_info->pages); | 119 | ret = efi_capsule_update(cap_hdr_temp, cap_info->pages); |
118 | kunmap(cap_info->pages[0]); | 120 | vunmap(cap_hdr_temp); |
119 | if (ret) { | 121 | if (ret) { |
120 | pr_err("%s: efi_capsule_update() failed\n", __func__); | 122 | pr_err("%s: efi_capsule_update() failed\n", __func__); |
121 | return ret; | 123 | return ret; |
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c index 53b9fd2293ee..6eedff45e6d7 100644 --- a/drivers/firmware/efi/capsule.c +++ b/drivers/firmware/efi/capsule.c | |||
@@ -190,9 +190,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule, | |||
190 | * map the capsule described by @capsule with its data in @pages and | 190 | * map the capsule described by @capsule with its data in @pages and |
191 | * send it to the firmware via the UpdateCapsule() runtime service. | 191 | * send it to the firmware via the UpdateCapsule() runtime service. |
192 | * | 192 | * |
193 | * @capsule must be a virtual mapping of the first page in @pages | 193 | * @capsule must be a virtual mapping of the complete capsule update in the |
194 | * (@pages[0]) in the kernel address space. That is, a | 194 | * kernel address space, as the capsule can be consumed immediately. |
195 | * capsule_header_t that describes the entire contents of the capsule | 195 | * A capsule_header_t that describes the entire contents of the capsule |
196 | * must be at the start of the first data page. | 196 | * must be at the start of the first data page. |
197 | * | 197 | * |
198 | * Even though this function will validate that the firmware supports | 198 | * Even though this function will validate that the firmware supports |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 98dd47a30fc7..66a94103798b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -50,6 +50,7 @@ config GPIO_DEVRES | |||
50 | config OF_GPIO | 50 | config OF_GPIO |
51 | def_bool y | 51 | def_bool y |
52 | depends on OF | 52 | depends on OF |
53 | depends on HAS_IOMEM | ||
53 | 54 | ||
54 | config GPIO_ACPI | 55 | config GPIO_ACPI |
55 | def_bool y | 56 | def_bool y |
@@ -188,7 +189,7 @@ config GPIO_EP93XX | |||
188 | config GPIO_ETRAXFS | 189 | config GPIO_ETRAXFS |
189 | bool "Axis ETRAX FS General I/O" | 190 | bool "Axis ETRAX FS General I/O" |
190 | depends on CRIS || COMPILE_TEST | 191 | depends on CRIS || COMPILE_TEST |
191 | depends on OF | 192 | depends on OF_GPIO |
192 | select GPIO_GENERIC | 193 | select GPIO_GENERIC |
193 | select GPIOLIB_IRQCHIP | 194 | select GPIOLIB_IRQCHIP |
194 | help | 195 | help |
@@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM | |||
214 | 215 | ||
215 | config GPIO_GRGPIO | 216 | config GPIO_GRGPIO |
216 | tristate "Aeroflex Gaisler GRGPIO support" | 217 | tristate "Aeroflex Gaisler GRGPIO support" |
217 | depends on OF | 218 | depends on OF_GPIO |
218 | select GPIO_GENERIC | 219 | select GPIO_GENERIC |
219 | select IRQ_DOMAIN | 220 | select IRQ_DOMAIN |
220 | help | 221 | help |
@@ -312,7 +313,7 @@ config GPIO_MPC8XXX | |||
312 | config GPIO_MVEBU | 313 | config GPIO_MVEBU |
313 | def_bool y | 314 | def_bool y |
314 | depends on PLAT_ORION | 315 | depends on PLAT_ORION |
315 | depends on OF | 316 | depends on OF_GPIO |
316 | select GENERIC_IRQ_CHIP | 317 | select GENERIC_IRQ_CHIP |
317 | 318 | ||
318 | config GPIO_MXC | 319 | config GPIO_MXC |
@@ -405,7 +406,7 @@ config GPIO_TEGRA | |||
405 | bool "NVIDIA Tegra GPIO support" | 406 | bool "NVIDIA Tegra GPIO support" |
406 | default ARCH_TEGRA | 407 | default ARCH_TEGRA |
407 | depends on ARCH_TEGRA || COMPILE_TEST | 408 | depends on ARCH_TEGRA || COMPILE_TEST |
408 | depends on OF | 409 | depends on OF_GPIO |
409 | help | 410 | help |
410 | Say yes here to support GPIO pins on NVIDIA Tegra SoCs. | 411 | Say yes here to support GPIO pins on NVIDIA Tegra SoCs. |
411 | 412 | ||
@@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders" | |||
1099 | 1100 | ||
1100 | config GPIO_74X164 | 1101 | config GPIO_74X164 |
1101 | tristate "74x164 serial-in/parallel-out 8-bits shift register" | 1102 | tristate "74x164 serial-in/parallel-out 8-bits shift register" |
1102 | depends on OF | 1103 | depends on OF_GPIO |
1103 | help | 1104 | help |
1104 | Driver for 74x164 compatible serial-in/parallel-out 8-outputs | 1105 | Driver for 74x164 compatible serial-in/parallel-out 8-outputs |
1105 | shift registers. This driver can be used to provide access | 1106 | shift registers. This driver can be used to provide access |
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c index 08807368f007..946d09195598 100644 --- a/drivers/gpio/gpio-max730x.c +++ b/drivers/gpio/gpio-max730x.c | |||
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts) | |||
192 | ts->chip.parent = dev; | 192 | ts->chip.parent = dev; |
193 | ts->chip.owner = THIS_MODULE; | 193 | ts->chip.owner = THIS_MODULE; |
194 | 194 | ||
195 | ret = gpiochip_add_data(&ts->chip, ts); | ||
196 | if (ret) | ||
197 | goto exit_destroy; | ||
198 | |||
195 | /* | 199 | /* |
196 | * initialize pullups according to platform data and cache the | 200 | * initialize pullups according to platform data and cache the |
197 | * register values for later use. | 201 | * register values for later use. |
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts) | |||
213 | } | 217 | } |
214 | } | 218 | } |
215 | 219 | ||
216 | ret = gpiochip_add_data(&ts->chip, ts); | ||
217 | if (ret) | ||
218 | goto exit_destroy; | ||
219 | |||
220 | return ret; | 220 | return ret; |
221 | 221 | ||
222 | exit_destroy: | 222 | exit_destroy: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8ebc5f1eb4c0..700c56baf2de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -426,6 +426,8 @@ struct amdgpu_mman { | |||
426 | 426 | ||
427 | /* custom LRU management */ | 427 | /* custom LRU management */ |
428 | struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; | 428 | struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; |
429 | /* guard for log2_size array, don't add anything in between */ | ||
430 | struct amdgpu_mman_lru guard; | ||
429 | }; | 431 | }; |
430 | 432 | ||
431 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | 433 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, |
@@ -646,9 +648,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); | |||
646 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); | 648 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); |
647 | int amdgpu_gart_init(struct amdgpu_device *adev); | 649 | int amdgpu_gart_init(struct amdgpu_device *adev); |
648 | void amdgpu_gart_fini(struct amdgpu_device *adev); | 650 | void amdgpu_gart_fini(struct amdgpu_device *adev); |
649 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | 651 | void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, |
650 | int pages); | 652 | int pages); |
651 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | 653 | int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, |
652 | int pages, struct page **pagelist, | 654 | int pages, struct page **pagelist, |
653 | dma_addr_t *dma_addr, uint32_t flags); | 655 | dma_addr_t *dma_addr, uint32_t flags); |
654 | 656 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 983175363b06..fe872b82e619 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * | |||
321 | (le16_to_cpu(path->usConnObjectId) & | 321 | (le16_to_cpu(path->usConnObjectId) & |
322 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; | 322 | OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; |
323 | 323 | ||
324 | /* Skip TV/CV support */ | ||
325 | if ((le16_to_cpu(path->usDeviceTag) == | ||
326 | ATOM_DEVICE_TV1_SUPPORT) || | ||
327 | (le16_to_cpu(path->usDeviceTag) == | ||
328 | ATOM_DEVICE_CV_SUPPORT)) | ||
329 | continue; | ||
330 | |||
331 | if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) { | ||
332 | DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n", | ||
333 | con_obj_id, le16_to_cpu(path->usDeviceTag)); | ||
334 | continue; | ||
335 | } | ||
336 | |||
324 | connector_type = | 337 | connector_type = |
325 | object_connector_convert[con_obj_id]; | 338 | object_connector_convert[con_obj_id]; |
326 | connector_object_id = con_obj_id; | 339 | connector_object_id = con_obj_id; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 49de92600074..10b5ddf2c588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) | |||
200 | atpx->is_hybrid = false; | 200 | atpx->is_hybrid = false; |
201 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 201 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
202 | printk("ATPX Hybrid Graphics\n"); | 202 | printk("ATPX Hybrid Graphics\n"); |
203 | #if 1 | ||
204 | /* This is a temporary hack until the D3 cold support | ||
205 | * makes it upstream. The ATPX power_control method seems | ||
206 | * to still work on even if the system should be using | ||
207 | * the new standardized hybrid D3 cold ACPI interface. | ||
208 | */ | ||
209 | atpx->functions.power_cntl = true; | ||
210 | #else | ||
211 | atpx->functions.power_cntl = false; | 203 | atpx->functions.power_cntl = false; |
212 | #endif | ||
213 | atpx->is_hybrid = true; | 204 | atpx->is_hybrid = true; |
214 | } | 205 | } |
215 | 206 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 921bce2df0b0..0feea347f680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) | |||
221 | * Unbinds the requested pages from the gart page table and | 221 | * Unbinds the requested pages from the gart page table and |
222 | * replaces them with the dummy page (all asics). | 222 | * replaces them with the dummy page (all asics). |
223 | */ | 223 | */ |
224 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | 224 | void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, |
225 | int pages) | 225 | int pages) |
226 | { | 226 | { |
227 | unsigned t; | 227 | unsigned t; |
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | |||
268 | * (all asics). | 268 | * (all asics). |
269 | * Returns 0 for success, -EINVAL for failure. | 269 | * Returns 0 for success, -EINVAL for failure. |
270 | */ | 270 | */ |
271 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | 271 | int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, |
272 | int pages, struct page **pagelist, dma_addr_t *dma_addr, | 272 | int pages, struct page **pagelist, dma_addr_t *dma_addr, |
273 | uint32_t flags) | 273 | uint32_t flags) |
274 | { | 274 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9b61c8ba7aaf..716f2afeb6a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |||
251 | 251 | ||
252 | adev = amdgpu_get_adev(bo->bdev); | 252 | adev = amdgpu_get_adev(bo->bdev); |
253 | ring = adev->mman.buffer_funcs_ring; | 253 | ring = adev->mman.buffer_funcs_ring; |
254 | old_start = old_mem->start << PAGE_SHIFT; | 254 | old_start = (u64)old_mem->start << PAGE_SHIFT; |
255 | new_start = new_mem->start << PAGE_SHIFT; | 255 | new_start = (u64)new_mem->start << PAGE_SHIFT; |
256 | 256 | ||
257 | switch (old_mem->mem_type) { | 257 | switch (old_mem->mem_type) { |
258 | case TTM_PL_VRAM: | 258 | case TTM_PL_VRAM: |
@@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) | |||
950 | struct list_head *res = lru->lru[tbo->mem.mem_type]; | 950 | struct list_head *res = lru->lru[tbo->mem.mem_type]; |
951 | 951 | ||
952 | lru->lru[tbo->mem.mem_type] = &tbo->lru; | 952 | lru->lru[tbo->mem.mem_type] = &tbo->lru; |
953 | while ((++lru)->lru[tbo->mem.mem_type] == res) | ||
954 | lru->lru[tbo->mem.mem_type] = &tbo->lru; | ||
953 | 955 | ||
954 | return res; | 956 | return res; |
955 | } | 957 | } |
@@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) | |||
960 | struct list_head *res = lru->swap_lru; | 962 | struct list_head *res = lru->swap_lru; |
961 | 963 | ||
962 | lru->swap_lru = &tbo->swap; | 964 | lru->swap_lru = &tbo->swap; |
965 | while ((++lru)->swap_lru == res) | ||
966 | lru->swap_lru = &tbo->swap; | ||
963 | 967 | ||
964 | return res; | 968 | return res; |
965 | } | 969 | } |
@@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
1011 | lru->swap_lru = &adev->mman.bdev.glob->swap_lru; | 1015 | lru->swap_lru = &adev->mman.bdev.glob->swap_lru; |
1012 | } | 1016 | } |
1013 | 1017 | ||
1018 | for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) | ||
1019 | adev->mman.guard.lru[j] = NULL; | ||
1020 | adev->mman.guard.swap_lru = NULL; | ||
1021 | |||
1014 | adev->mman.initialized = true; | 1022 | adev->mman.initialized = true; |
1015 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, | 1023 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, |
1016 | adev->mc.real_vram_size >> PAGE_SHIFT); | 1024 | adev->mc.real_vram_size >> PAGE_SHIFT); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b11f4e8868d7..4aa993d19018 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1187 | r = 0; | 1187 | r = 0; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | error: | ||
1191 | fence_put(fence); | 1190 | fence_put(fence); |
1191 | |||
1192 | error: | ||
1192 | return r; | 1193 | return r; |
1193 | } | 1194 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e642fc48df4..80120fa4092c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1535 | r = amd_sched_entity_init(&ring->sched, &vm->entity, | 1535 | r = amd_sched_entity_init(&ring->sched, &vm->entity, |
1536 | rq, amdgpu_sched_jobs); | 1536 | rq, amdgpu_sched_jobs); |
1537 | if (r) | 1537 | if (r) |
1538 | return r; | 1538 | goto err; |
1539 | 1539 | ||
1540 | vm->page_directory_fence = NULL; | 1540 | vm->page_directory_fence = NULL; |
1541 | 1541 | ||
@@ -1565,6 +1565,9 @@ error_free_page_directory: | |||
1565 | error_free_sched_entity: | 1565 | error_free_sched_entity: |
1566 | amd_sched_entity_fini(&ring->sched, &vm->entity); | 1566 | amd_sched_entity_fini(&ring->sched, &vm->entity); |
1567 | 1567 | ||
1568 | err: | ||
1569 | drm_free_large(vm->page_tables); | ||
1570 | |||
1568 | return r; | 1571 | return r; |
1569 | } | 1572 | } |
1570 | 1573 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 1351c7e834a2..a64715d90503 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
714 | DRM_ERROR("amdgpu: IB test timed out\n"); | 714 | DRM_ERROR("amdgpu: IB test timed out\n"); |
715 | r = -ETIMEDOUT; | 715 | r = -ETIMEDOUT; |
716 | goto err1; | 716 | goto err1; |
717 | } else if (r) { | 717 | } else if (r < 0) { |
718 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | 718 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); |
719 | goto err1; | 719 | goto err1; |
720 | } | 720 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index e621eba63126..a7d3cb3fead0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | |||
@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, | |||
184 | sizeof(u32)) + inx; | 184 | sizeof(u32)) + inx; |
185 | 185 | ||
186 | pr_debug("kfd: get kernel queue doorbell\n" | 186 | pr_debug("kfd: get kernel queue doorbell\n" |
187 | " doorbell offset == 0x%08d\n" | 187 | " doorbell offset == 0x%08X\n" |
188 | " kernel address == 0x%08lX\n", | 188 | " kernel address == 0x%08lX\n", |
189 | *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); | 189 | *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); |
190 | 190 | ||
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ef312bb75fda..963a24d46a93 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) | |||
405 | spin_lock(&sched->job_list_lock); | 405 | spin_lock(&sched->job_list_lock); |
406 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, | 406 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, |
407 | struct amd_sched_job, node); | 407 | struct amd_sched_job, node); |
408 | if (s_job) | 408 | if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) |
409 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); | 409 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
410 | 410 | ||
411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { | 411 | list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index fa3930757972..2a3ded44cf2a 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
475 | val, | 475 | val, |
476 | -1, | 476 | -1, |
477 | &replaced); | 477 | &replaced); |
478 | state->color_mgmt_changed = replaced; | 478 | state->color_mgmt_changed |= replaced; |
479 | return ret; | 479 | return ret; |
480 | } else if (property == config->ctm_property) { | 480 | } else if (property == config->ctm_property) { |
481 | ret = drm_atomic_replace_property_blob_from_id(crtc, | 481 | ret = drm_atomic_replace_property_blob_from_id(crtc, |
@@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
483 | val, | 483 | val, |
484 | sizeof(struct drm_color_ctm), | 484 | sizeof(struct drm_color_ctm), |
485 | &replaced); | 485 | &replaced); |
486 | state->color_mgmt_changed = replaced; | 486 | state->color_mgmt_changed |= replaced; |
487 | return ret; | 487 | return ret; |
488 | } else if (property == config->gamma_lut_property) { | 488 | } else if (property == config->gamma_lut_property) { |
489 | ret = drm_atomic_replace_property_blob_from_id(crtc, | 489 | ret = drm_atomic_replace_property_blob_from_id(crtc, |
@@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, | |||
491 | val, | 491 | val, |
492 | -1, | 492 | -1, |
493 | &replaced); | 493 | &replaced); |
494 | state->color_mgmt_changed = replaced; | 494 | state->color_mgmt_changed |= replaced; |
495 | return ret; | 495 | return ret; |
496 | } else if (crtc->funcs->atomic_set_property) | 496 | } else if (crtc->funcs->atomic_set_property) |
497 | return crtc->funcs->atomic_set_property(crtc, state, property, val); | 497 | return crtc->funcs->atomic_set_property(crtc, state, property, val); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b1dbb60af99f..ddebe54cd5ca 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -5404,6 +5404,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, | |||
5404 | struct drm_pending_vblank_event *e = NULL; | 5404 | struct drm_pending_vblank_event *e = NULL; |
5405 | int ret = -EINVAL; | 5405 | int ret = -EINVAL; |
5406 | 5406 | ||
5407 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
5408 | return -EINVAL; | ||
5409 | |||
5407 | if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || | 5410 | if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || |
5408 | page_flip->reserved != 0) | 5411 | page_flip->reserved != 0) |
5409 | return -EINVAL; | 5412 | return -EINVAL; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index ce54e985d91b..0a06f9120b5a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) | |||
464 | 464 | ||
465 | /* Sometimes user space wants everything disabled, so don't steal the | 465 | /* Sometimes user space wants everything disabled, so don't steal the |
466 | * display if there's a master. */ | 466 | * display if there's a master. */ |
467 | if (lockless_dereference(dev->master)) | 467 | if (READ_ONCE(dev->master)) |
468 | return false; | 468 | return false; |
469 | 469 | ||
470 | drm_for_each_crtc(crtc, dev) { | 470 | drm_for_each_crtc(crtc, dev) { |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 87ef34150d46..b382cf505262 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
@@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1333 | if (ret < 0) | 1333 | if (ret < 0) |
1334 | return ret; | 1334 | return ret; |
1335 | 1335 | ||
1336 | mutex_lock(&gpu->lock); | ||
1337 | |||
1338 | /* | 1336 | /* |
1339 | * TODO | 1337 | * TODO |
1340 | * | 1338 | * |
@@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1348 | if (unlikely(event == ~0U)) { | 1346 | if (unlikely(event == ~0U)) { |
1349 | DRM_ERROR("no free event\n"); | 1347 | DRM_ERROR("no free event\n"); |
1350 | ret = -EBUSY; | 1348 | ret = -EBUSY; |
1351 | goto out_unlock; | 1349 | goto out_pm_put; |
1352 | } | 1350 | } |
1353 | 1351 | ||
1354 | fence = etnaviv_gpu_fence_alloc(gpu); | 1352 | fence = etnaviv_gpu_fence_alloc(gpu); |
1355 | if (!fence) { | 1353 | if (!fence) { |
1356 | event_free(gpu, event); | 1354 | event_free(gpu, event); |
1357 | ret = -ENOMEM; | 1355 | ret = -ENOMEM; |
1358 | goto out_unlock; | 1356 | goto out_pm_put; |
1359 | } | 1357 | } |
1360 | 1358 | ||
1359 | mutex_lock(&gpu->lock); | ||
1360 | |||
1361 | gpu->event[event].fence = fence; | 1361 | gpu->event[event].fence = fence; |
1362 | submit->fence = fence->seqno; | 1362 | submit->fence = fence->seqno; |
1363 | gpu->active_fence = submit->fence; | 1363 | gpu->active_fence = submit->fence; |
@@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1395 | hangcheck_timer_reset(gpu); | 1395 | hangcheck_timer_reset(gpu); |
1396 | ret = 0; | 1396 | ret = 0; |
1397 | 1397 | ||
1398 | out_unlock: | ||
1399 | mutex_unlock(&gpu->lock); | 1398 | mutex_unlock(&gpu->lock); |
1400 | 1399 | ||
1400 | out_pm_put: | ||
1401 | etnaviv_gpu_pm_put(gpu); | 1401 | etnaviv_gpu_pm_put(gpu); |
1402 | 1402 | ||
1403 | return ret; | 1403 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21f939074abc..f68c78918d63 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -882,11 +882,12 @@ struct i915_gem_context { | |||
882 | 882 | ||
883 | struct i915_ctx_hang_stats hang_stats; | 883 | struct i915_ctx_hang_stats hang_stats; |
884 | 884 | ||
885 | /* Unique identifier for this context, used by the hw for tracking */ | ||
886 | unsigned long flags; | 885 | unsigned long flags; |
887 | #define CONTEXT_NO_ZEROMAP BIT(0) | 886 | #define CONTEXT_NO_ZEROMAP BIT(0) |
888 | #define CONTEXT_NO_ERROR_CAPTURE BIT(1) | 887 | #define CONTEXT_NO_ERROR_CAPTURE BIT(1) |
889 | unsigned hw_id; | 888 | |
889 | /* Unique identifier for this context, used by the hw for tracking */ | ||
890 | unsigned int hw_id; | ||
890 | u32 user_handle; | 891 | u32 user_handle; |
891 | 892 | ||
892 | u32 ggtt_alignment; | 893 | u32 ggtt_alignment; |
@@ -1854,6 +1855,7 @@ struct drm_i915_private { | |||
1854 | enum modeset_restore modeset_restore; | 1855 | enum modeset_restore modeset_restore; |
1855 | struct mutex modeset_restore_lock; | 1856 | struct mutex modeset_restore_lock; |
1856 | struct drm_atomic_state *modeset_restore_state; | 1857 | struct drm_atomic_state *modeset_restore_state; |
1858 | struct drm_modeset_acquire_ctx reset_ctx; | ||
1857 | 1859 | ||
1858 | struct list_head vm_list; /* Global list of all address spaces */ | 1860 | struct list_head vm_list; /* Global list of all address spaces */ |
1859 | struct i915_ggtt ggtt; /* VM representing the global address space */ | 1861 | struct i915_ggtt ggtt; /* VM representing the global address space */ |
@@ -1962,6 +1964,13 @@ struct drm_i915_private { | |||
1962 | struct i915_suspend_saved_registers regfile; | 1964 | struct i915_suspend_saved_registers regfile; |
1963 | struct vlv_s0ix_state vlv_s0ix_state; | 1965 | struct vlv_s0ix_state vlv_s0ix_state; |
1964 | 1966 | ||
1967 | enum { | ||
1968 | I915_SKL_SAGV_UNKNOWN = 0, | ||
1969 | I915_SKL_SAGV_DISABLED, | ||
1970 | I915_SKL_SAGV_ENABLED, | ||
1971 | I915_SKL_SAGV_NOT_CONTROLLED | ||
1972 | } skl_sagv_status; | ||
1973 | |||
1965 | struct { | 1974 | struct { |
1966 | /* | 1975 | /* |
1967 | * Raw watermark latency values: | 1976 | * Raw watermark latency values: |
@@ -3590,6 +3599,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | |||
3590 | /* belongs in i915_gem_gtt.h */ | 3599 | /* belongs in i915_gem_gtt.h */ |
3591 | static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) | 3600 | static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) |
3592 | { | 3601 | { |
3602 | wmb(); | ||
3593 | if (INTEL_GEN(dev_priv) < 6) | 3603 | if (INTEL_GEN(dev_priv) < 6) |
3594 | intel_gtt_chipset_flush(); | 3604 | intel_gtt_chipset_flush(); |
3595 | } | 3605 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 11681501d7b1..a77ce9983f69 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
879 | ret = i915_gem_shmem_pread(dev, obj, args, file); | 879 | ret = i915_gem_shmem_pread(dev, obj, args, file); |
880 | 880 | ||
881 | /* pread for non shmem backed objects */ | 881 | /* pread for non shmem backed objects */ |
882 | if (ret == -EFAULT || ret == -ENODEV) | 882 | if (ret == -EFAULT || ret == -ENODEV) { |
883 | intel_runtime_pm_get(to_i915(dev)); | ||
883 | ret = i915_gem_gtt_pread(dev, obj, args->size, | 884 | ret = i915_gem_gtt_pread(dev, obj, args->size, |
884 | args->offset, args->data_ptr); | 885 | args->offset, args->data_ptr); |
886 | intel_runtime_pm_put(to_i915(dev)); | ||
887 | } | ||
885 | 888 | ||
886 | out: | 889 | out: |
887 | drm_gem_object_unreference(&obj->base); | 890 | drm_gem_object_unreference(&obj->base); |
@@ -1306,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1306 | * textures). Fallback to the shmem path in that case. */ | 1309 | * textures). Fallback to the shmem path in that case. */ |
1307 | } | 1310 | } |
1308 | 1311 | ||
1309 | if (ret == -EFAULT) { | 1312 | if (ret == -EFAULT || ret == -ENOSPC) { |
1310 | if (obj->phys_handle) | 1313 | if (obj->phys_handle) |
1311 | ret = i915_gem_phys_pwrite(obj, args, file); | 1314 | ret = i915_gem_phys_pwrite(obj, args, file); |
1312 | else if (i915_gem_object_has_struct_page(obj)) | 1315 | else if (i915_gem_object_has_struct_page(obj)) |
@@ -3169,6 +3172,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) | |||
3169 | } | 3172 | } |
3170 | 3173 | ||
3171 | intel_ring_init_seqno(engine, engine->last_submitted_seqno); | 3174 | intel_ring_init_seqno(engine, engine->last_submitted_seqno); |
3175 | |||
3176 | engine->i915->gt.active_engines &= ~intel_engine_flag(engine); | ||
3172 | } | 3177 | } |
3173 | 3178 | ||
3174 | void i915_gem_reset(struct drm_device *dev) | 3179 | void i915_gem_reset(struct drm_device *dev) |
@@ -3186,6 +3191,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
3186 | 3191 | ||
3187 | for_each_engine(engine, dev_priv) | 3192 | for_each_engine(engine, dev_priv) |
3188 | i915_gem_reset_engine_cleanup(engine); | 3193 | i915_gem_reset_engine_cleanup(engine); |
3194 | mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); | ||
3189 | 3195 | ||
3190 | i915_gem_context_reset(dev); | 3196 | i915_gem_context_reset(dev); |
3191 | 3197 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1978633e7549..b35e5b6475b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, | |||
943 | { | 943 | { |
944 | const unsigned other_rings = ~intel_engine_flag(req->engine); | 944 | const unsigned other_rings = ~intel_engine_flag(req->engine); |
945 | struct i915_vma *vma; | 945 | struct i915_vma *vma; |
946 | uint32_t flush_domains = 0; | ||
947 | bool flush_chipset = false; | ||
948 | int ret; | 946 | int ret; |
949 | 947 | ||
950 | list_for_each_entry(vma, vmas, exec_list) { | 948 | list_for_each_entry(vma, vmas, exec_list) { |
@@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, | |||
957 | } | 955 | } |
958 | 956 | ||
959 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) | 957 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
960 | flush_chipset |= i915_gem_clflush_object(obj, false); | 958 | i915_gem_clflush_object(obj, false); |
961 | |||
962 | flush_domains |= obj->base.write_domain; | ||
963 | } | 959 | } |
964 | 960 | ||
965 | if (flush_chipset) | 961 | /* Unconditionally flush any chipset caches (for streaming writes). */ |
966 | i915_gem_chipset_flush(req->engine->i915); | 962 | i915_gem_chipset_flush(req->engine->i915); |
967 | |||
968 | if (flush_domains & I915_GEM_DOMAIN_GTT) | ||
969 | wmb(); | ||
970 | 963 | ||
971 | /* Unconditionally invalidate gpu caches and ensure that we do flush | 964 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
972 | * any residual writes from the previous batch. | 965 | * any residual writes from the previous batch. |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 10f1e32767e6..7a30af79d799 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -2873,6 +2873,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) | |||
2873 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | 2873 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
2874 | 2874 | ||
2875 | ppgtt->base.cleanup(&ppgtt->base); | 2875 | ppgtt->base.cleanup(&ppgtt->base); |
2876 | kfree(ppgtt); | ||
2876 | } | 2877 | } |
2877 | 2878 | ||
2878 | i915_gem_cleanup_stolen(dev); | 2879 | i915_gem_cleanup_stolen(dev); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce14fe09d962..bf2cad3f9e1f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1536,6 +1536,7 @@ enum skl_disp_power_wells { | |||
1536 | #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) | 1536 | #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) |
1537 | /* Balance leg disable bits */ | 1537 | /* Balance leg disable bits */ |
1538 | #define BALANCE_LEG_DISABLE_SHIFT 23 | 1538 | #define BALANCE_LEG_DISABLE_SHIFT 23 |
1539 | #define BALANCE_LEG_DISABLE(port) (1 << (23 + (port))) | ||
1539 | 1540 | ||
1540 | /* | 1541 | /* |
1541 | * Fence registers | 1542 | * Fence registers |
@@ -7144,6 +7145,15 @@ enum { | |||
7144 | 7145 | ||
7145 | #define GEN6_PCODE_MAILBOX _MMIO(0x138124) | 7146 | #define GEN6_PCODE_MAILBOX _MMIO(0x138124) |
7146 | #define GEN6_PCODE_READY (1<<31) | 7147 | #define GEN6_PCODE_READY (1<<31) |
7148 | #define GEN6_PCODE_ERROR_MASK 0xFF | ||
7149 | #define GEN6_PCODE_SUCCESS 0x0 | ||
7150 | #define GEN6_PCODE_ILLEGAL_CMD 0x1 | ||
7151 | #define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2 | ||
7152 | #define GEN6_PCODE_TIMEOUT 0x3 | ||
7153 | #define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF | ||
7154 | #define GEN7_PCODE_TIMEOUT 0x2 | ||
7155 | #define GEN7_PCODE_ILLEGAL_DATA 0x3 | ||
7156 | #define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10 | ||
7147 | #define GEN6_PCODE_WRITE_RC6VIDS 0x4 | 7157 | #define GEN6_PCODE_WRITE_RC6VIDS 0x4 |
7148 | #define GEN6_PCODE_READ_RC6VIDS 0x5 | 7158 | #define GEN6_PCODE_READ_RC6VIDS 0x5 |
7149 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) | 7159 | #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) |
@@ -7165,6 +7175,10 @@ enum { | |||
7165 | #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 | 7175 | #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 |
7166 | #define DISPLAY_IPS_CONTROL 0x19 | 7176 | #define DISPLAY_IPS_CONTROL 0x19 |
7167 | #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A | 7177 | #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A |
7178 | #define GEN9_PCODE_SAGV_CONTROL 0x21 | ||
7179 | #define GEN9_SAGV_DISABLE 0x0 | ||
7180 | #define GEN9_SAGV_IS_DISABLED 0x1 | ||
7181 | #define GEN9_SAGV_ENABLE 0x3 | ||
7168 | #define GEN6_PCODE_DATA _MMIO(0x138128) | 7182 | #define GEN6_PCODE_DATA _MMIO(0x138128) |
7169 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | 7183 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
7170 | #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 | 7184 | #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 6700a7be7f78..d32f586f9c05 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, | |||
600 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) | 600 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) |
601 | return; | 601 | return; |
602 | 602 | ||
603 | i915_audio_component_get_power(dev); | ||
604 | |||
603 | /* | 605 | /* |
604 | * Enable/disable generating the codec wake signal, overriding the | 606 | * Enable/disable generating the codec wake signal, overriding the |
605 | * internal logic to generate the codec wake to controller. | 607 | * internal logic to generate the codec wake to controller. |
@@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, | |||
615 | I915_WRITE(HSW_AUD_CHICKENBIT, tmp); | 617 | I915_WRITE(HSW_AUD_CHICKENBIT, tmp); |
616 | usleep_range(1000, 1500); | 618 | usleep_range(1000, 1500); |
617 | } | 619 | } |
620 | |||
621 | i915_audio_component_put_power(dev); | ||
618 | } | 622 | } |
619 | 623 | ||
620 | /* Get CDCLK in kHz */ | 624 | /* Get CDCLK in kHz */ |
@@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
648 | !IS_HASWELL(dev_priv)) | 652 | !IS_HASWELL(dev_priv)) |
649 | return 0; | 653 | return 0; |
650 | 654 | ||
655 | i915_audio_component_get_power(dev); | ||
651 | mutex_lock(&dev_priv->av_mutex); | 656 | mutex_lock(&dev_priv->av_mutex); |
652 | /* 1. get the pipe */ | 657 | /* 1. get the pipe */ |
653 | intel_encoder = dev_priv->dig_port_map[port]; | 658 | intel_encoder = dev_priv->dig_port_map[port]; |
@@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
698 | 703 | ||
699 | unlock: | 704 | unlock: |
700 | mutex_unlock(&dev_priv->av_mutex); | 705 | mutex_unlock(&dev_priv->av_mutex); |
706 | i915_audio_component_put_power(dev); | ||
701 | return err; | 707 | return err; |
702 | } | 708 | } |
703 | 709 | ||
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 3edb9580928e..c3b33a10c15c 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
@@ -41,15 +41,15 @@ | |||
41 | * be moved to FW_FAILED. | 41 | * be moved to FW_FAILED. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" | 44 | #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" |
45 | MODULE_FIRMWARE(I915_CSR_KBL); | 45 | MODULE_FIRMWARE(I915_CSR_KBL); |
46 | #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) | 46 | #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) |
47 | 47 | ||
48 | #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" | 48 | #define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin" |
49 | MODULE_FIRMWARE(I915_CSR_SKL); | 49 | MODULE_FIRMWARE(I915_CSR_SKL); |
50 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) | 50 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26) |
51 | 51 | ||
52 | #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" | 52 | #define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" |
53 | MODULE_FIRMWARE(I915_CSR_BXT); | 53 | MODULE_FIRMWARE(I915_CSR_BXT); |
54 | #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) | 54 | #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) |
55 | 55 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index dd1d6fe12297..1a7efac65fd5 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { | |||
145 | static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { | 145 | static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { |
146 | { 0x0000201B, 0x000000A2, 0x0 }, | 146 | { 0x0000201B, 0x000000A2, 0x0 }, |
147 | { 0x00005012, 0x00000088, 0x0 }, | 147 | { 0x00005012, 0x00000088, 0x0 }, |
148 | { 0x80007011, 0x000000CD, 0x0 }, | 148 | { 0x80007011, 0x000000CD, 0x1 }, |
149 | { 0x80009010, 0x000000C0, 0x1 }, | 149 | { 0x80009010, 0x000000C0, 0x1 }, |
150 | { 0x0000201B, 0x0000009D, 0x0 }, | 150 | { 0x0000201B, 0x0000009D, 0x0 }, |
151 | { 0x80005012, 0x000000C0, 0x1 }, | 151 | { 0x80005012, 0x000000C0, 0x1 }, |
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { | |||
158 | static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { | 158 | static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { |
159 | { 0x00000018, 0x000000A2, 0x0 }, | 159 | { 0x00000018, 0x000000A2, 0x0 }, |
160 | { 0x00005012, 0x00000088, 0x0 }, | 160 | { 0x00005012, 0x00000088, 0x0 }, |
161 | { 0x80007011, 0x000000CD, 0x0 }, | 161 | { 0x80007011, 0x000000CD, 0x3 }, |
162 | { 0x80009010, 0x000000C0, 0x3 }, | 162 | { 0x80009010, 0x000000C0, 0x3 }, |
163 | { 0x00000018, 0x0000009D, 0x0 }, | 163 | { 0x00000018, 0x0000009D, 0x0 }, |
164 | { 0x80005012, 0x000000C0, 0x3 }, | 164 | { 0x80005012, 0x000000C0, 0x3 }, |
@@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) | |||
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) | ||
392 | { | ||
393 | int n_hdmi_entries; | ||
394 | int hdmi_level; | ||
395 | int hdmi_default_entry; | ||
396 | |||
397 | hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; | ||
398 | |||
399 | if (IS_BROXTON(dev_priv)) | ||
400 | return hdmi_level; | ||
401 | |||
402 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | ||
403 | skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); | ||
404 | hdmi_default_entry = 8; | ||
405 | } else if (IS_BROADWELL(dev_priv)) { | ||
406 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | ||
407 | hdmi_default_entry = 7; | ||
408 | } else if (IS_HASWELL(dev_priv)) { | ||
409 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); | ||
410 | hdmi_default_entry = 6; | ||
411 | } else { | ||
412 | WARN(1, "ddi translation table missing\n"); | ||
413 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | ||
414 | hdmi_default_entry = 7; | ||
415 | } | ||
416 | |||
417 | /* Choose a good default if VBT is badly populated */ | ||
418 | if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || | ||
419 | hdmi_level >= n_hdmi_entries) | ||
420 | hdmi_level = hdmi_default_entry; | ||
421 | |||
422 | return hdmi_level; | ||
423 | } | ||
424 | |||
391 | /* | 425 | /* |
392 | * Starting with Haswell, DDI port buffers must be programmed with correct | 426 | * Starting with Haswell, DDI port buffers must be programmed with correct |
393 | * values in advance. The buffer values are different for FDI and DP modes, | 427 | * values in advance. The buffer values are different for FDI and DP modes, |
@@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
399 | { | 433 | { |
400 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 434 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
401 | u32 iboost_bit = 0; | 435 | u32 iboost_bit = 0; |
402 | int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, | 436 | int i, n_hdmi_entries, n_dp_entries, n_edp_entries, |
403 | size; | 437 | size; |
404 | int hdmi_level; | 438 | int hdmi_level; |
405 | enum port port; | 439 | enum port port; |
@@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
410 | const struct ddi_buf_trans *ddi_translations; | 444 | const struct ddi_buf_trans *ddi_translations; |
411 | 445 | ||
412 | port = intel_ddi_get_encoder_port(encoder); | 446 | port = intel_ddi_get_encoder_port(encoder); |
413 | hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; | 447 | hdmi_level = intel_ddi_hdmi_level(dev_priv, port); |
414 | 448 | ||
415 | if (IS_BROXTON(dev_priv)) { | 449 | if (IS_BROXTON(dev_priv)) { |
416 | if (encoder->type != INTEL_OUTPUT_HDMI) | 450 | if (encoder->type != INTEL_OUTPUT_HDMI) |
@@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
430 | skl_get_buf_trans_edp(dev_priv, &n_edp_entries); | 464 | skl_get_buf_trans_edp(dev_priv, &n_edp_entries); |
431 | ddi_translations_hdmi = | 465 | ddi_translations_hdmi = |
432 | skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); | 466 | skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); |
433 | hdmi_default_entry = 8; | ||
434 | /* If we're boosting the current, set bit 31 of trans1 */ | 467 | /* If we're boosting the current, set bit 31 of trans1 */ |
435 | if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || | 468 | if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || |
436 | dev_priv->vbt.ddi_port_info[port].dp_boost_level) | 469 | dev_priv->vbt.ddi_port_info[port].dp_boost_level) |
@@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
456 | 489 | ||
457 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 490 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
458 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 491 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
459 | hdmi_default_entry = 7; | ||
460 | } else if (IS_HASWELL(dev_priv)) { | 492 | } else if (IS_HASWELL(dev_priv)) { |
461 | ddi_translations_fdi = hsw_ddi_translations_fdi; | 493 | ddi_translations_fdi = hsw_ddi_translations_fdi; |
462 | ddi_translations_dp = hsw_ddi_translations_dp; | 494 | ddi_translations_dp = hsw_ddi_translations_dp; |
@@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
464 | ddi_translations_hdmi = hsw_ddi_translations_hdmi; | 496 | ddi_translations_hdmi = hsw_ddi_translations_hdmi; |
465 | n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); | 497 | n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); |
466 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); | 498 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); |
467 | hdmi_default_entry = 6; | ||
468 | } else { | 499 | } else { |
469 | WARN(1, "ddi translation table missing\n"); | 500 | WARN(1, "ddi translation table missing\n"); |
470 | ddi_translations_edp = bdw_ddi_translations_dp; | 501 | ddi_translations_edp = bdw_ddi_translations_dp; |
@@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
474 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | 505 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); |
475 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 506 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
476 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 507 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
477 | hdmi_default_entry = 7; | ||
478 | } | 508 | } |
479 | 509 | ||
480 | switch (encoder->type) { | 510 | switch (encoder->type) { |
@@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
505 | if (encoder->type != INTEL_OUTPUT_HDMI) | 535 | if (encoder->type != INTEL_OUTPUT_HDMI) |
506 | return; | 536 | return; |
507 | 537 | ||
508 | /* Choose a good default if VBT is badly populated */ | ||
509 | if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || | ||
510 | hdmi_level >= n_hdmi_entries) | ||
511 | hdmi_level = hdmi_default_entry; | ||
512 | |||
513 | /* Entry 9 is for HDMI: */ | 538 | /* Entry 9 is for HDMI: */ |
514 | I915_WRITE(DDI_BUF_TRANS_LO(port, i), | 539 | I915_WRITE(DDI_BUF_TRANS_LO(port, i), |
515 | ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); | 540 | ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); |
@@ -1379,14 +1404,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) | |||
1379 | TRANS_CLK_SEL_DISABLED); | 1404 | TRANS_CLK_SEL_DISABLED); |
1380 | } | 1405 | } |
1381 | 1406 | ||
1382 | static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, | 1407 | static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, |
1383 | u32 level, enum port port, int type) | 1408 | enum port port, uint8_t iboost) |
1384 | { | 1409 | { |
1410 | u32 tmp; | ||
1411 | |||
1412 | tmp = I915_READ(DISPIO_CR_TX_BMU_CR0); | ||
1413 | tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); | ||
1414 | if (iboost) | ||
1415 | tmp |= iboost << BALANCE_LEG_SHIFT(port); | ||
1416 | else | ||
1417 | tmp |= BALANCE_LEG_DISABLE(port); | ||
1418 | I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp); | ||
1419 | } | ||
1420 | |||
1421 | static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level) | ||
1422 | { | ||
1423 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); | ||
1424 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | ||
1425 | enum port port = intel_dig_port->port; | ||
1426 | int type = encoder->type; | ||
1385 | const struct ddi_buf_trans *ddi_translations; | 1427 | const struct ddi_buf_trans *ddi_translations; |
1386 | uint8_t iboost; | 1428 | uint8_t iboost; |
1387 | uint8_t dp_iboost, hdmi_iboost; | 1429 | uint8_t dp_iboost, hdmi_iboost; |
1388 | int n_entries; | 1430 | int n_entries; |
1389 | u32 reg; | ||
1390 | 1431 | ||
1391 | /* VBT may override standard boost values */ | 1432 | /* VBT may override standard boost values */ |
1392 | dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; | 1433 | dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; |
@@ -1428,16 +1469,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, | |||
1428 | return; | 1469 | return; |
1429 | } | 1470 | } |
1430 | 1471 | ||
1431 | reg = I915_READ(DISPIO_CR_TX_BMU_CR0); | 1472 | _skl_ddi_set_iboost(dev_priv, port, iboost); |
1432 | reg &= ~BALANCE_LEG_MASK(port); | ||
1433 | reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port)); | ||
1434 | |||
1435 | if (iboost) | ||
1436 | reg |= iboost << BALANCE_LEG_SHIFT(port); | ||
1437 | else | ||
1438 | reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port); | ||
1439 | 1473 | ||
1440 | I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); | 1474 | if (port == PORT_A && intel_dig_port->max_lanes == 4) |
1475 | _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); | ||
1441 | } | 1476 | } |
1442 | 1477 | ||
1443 | static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, | 1478 | static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, |
@@ -1568,7 +1603,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) | |||
1568 | level = translate_signal_level(signal_levels); | 1603 | level = translate_signal_level(signal_levels); |
1569 | 1604 | ||
1570 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 1605 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
1571 | skl_ddi_set_iboost(dev_priv, level, port, encoder->type); | 1606 | skl_ddi_set_iboost(encoder, level); |
1572 | else if (IS_BROXTON(dev_priv)) | 1607 | else if (IS_BROXTON(dev_priv)) |
1573 | bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); | 1608 | bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); |
1574 | 1609 | ||
@@ -1637,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | |||
1637 | intel_dp_stop_link_train(intel_dp); | 1672 | intel_dp_stop_link_train(intel_dp); |
1638 | } else if (type == INTEL_OUTPUT_HDMI) { | 1673 | } else if (type == INTEL_OUTPUT_HDMI) { |
1639 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 1674 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
1675 | int level = intel_ddi_hdmi_level(dev_priv, port); | ||
1676 | |||
1677 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | ||
1678 | skl_ddi_set_iboost(intel_encoder, level); | ||
1640 | 1679 | ||
1641 | intel_hdmi->set_infoframes(encoder, | 1680 | intel_hdmi->set_infoframes(encoder, |
1642 | crtc->config->has_hdmi_sink, | 1681 | crtc->config->has_hdmi_sink, |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dcf93b3d4fb6..175595fc3e45 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev) | |||
3093 | 3093 | ||
3094 | for_each_crtc(dev, crtc) { | 3094 | for_each_crtc(dev, crtc) { |
3095 | struct intel_plane *plane = to_intel_plane(crtc->primary); | 3095 | struct intel_plane *plane = to_intel_plane(crtc->primary); |
3096 | struct intel_plane_state *plane_state; | 3096 | struct intel_plane_state *plane_state = |
3097 | 3097 | to_intel_plane_state(plane->base.state); | |
3098 | drm_modeset_lock_crtc(crtc, &plane->base); | ||
3099 | plane_state = to_intel_plane_state(plane->base.state); | ||
3100 | 3098 | ||
3101 | if (plane_state->visible) | 3099 | if (plane_state->visible) |
3102 | plane->update_plane(&plane->base, | 3100 | plane->update_plane(&plane->base, |
3103 | to_intel_crtc_state(crtc->state), | 3101 | to_intel_crtc_state(crtc->state), |
3104 | plane_state); | 3102 | plane_state); |
3103 | } | ||
3104 | } | ||
3105 | |||
3106 | static int | ||
3107 | __intel_display_resume(struct drm_device *dev, | ||
3108 | struct drm_atomic_state *state) | ||
3109 | { | ||
3110 | struct drm_crtc_state *crtc_state; | ||
3111 | struct drm_crtc *crtc; | ||
3112 | int i, ret; | ||
3113 | |||
3114 | intel_modeset_setup_hw_state(dev); | ||
3115 | i915_redisable_vga(dev); | ||
3105 | 3116 | ||
3106 | drm_modeset_unlock_crtc(crtc); | 3117 | if (!state) |
3118 | return 0; | ||
3119 | |||
3120 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
3121 | /* | ||
3122 | * Force recalculation even if we restore | ||
3123 | * current state. With fast modeset this may not result | ||
3124 | * in a modeset when the state is compatible. | ||
3125 | */ | ||
3126 | crtc_state->mode_changed = true; | ||
3107 | } | 3127 | } |
3128 | |||
3129 | /* ignore any reset values/BIOS leftovers in the WM registers */ | ||
3130 | to_intel_atomic_state(state)->skip_intermediate_wm = true; | ||
3131 | |||
3132 | ret = drm_atomic_commit(state); | ||
3133 | |||
3134 | WARN_ON(ret == -EDEADLK); | ||
3135 | return ret; | ||
3108 | } | 3136 | } |
3109 | 3137 | ||
3110 | void intel_prepare_reset(struct drm_i915_private *dev_priv) | 3138 | void intel_prepare_reset(struct drm_i915_private *dev_priv) |
3111 | { | 3139 | { |
3140 | struct drm_device *dev = &dev_priv->drm; | ||
3141 | struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; | ||
3142 | struct drm_atomic_state *state; | ||
3143 | int ret; | ||
3144 | |||
3112 | /* no reset support for gen2 */ | 3145 | /* no reset support for gen2 */ |
3113 | if (IS_GEN2(dev_priv)) | 3146 | if (IS_GEN2(dev_priv)) |
3114 | return; | 3147 | return; |
3115 | 3148 | ||
3116 | /* reset doesn't touch the display */ | 3149 | /* |
3150 | * Need mode_config.mutex so that we don't | ||
3151 | * trample ongoing ->detect() and whatnot. | ||
3152 | */ | ||
3153 | mutex_lock(&dev->mode_config.mutex); | ||
3154 | drm_modeset_acquire_init(ctx, 0); | ||
3155 | while (1) { | ||
3156 | ret = drm_modeset_lock_all_ctx(dev, ctx); | ||
3157 | if (ret != -EDEADLK) | ||
3158 | break; | ||
3159 | |||
3160 | drm_modeset_backoff(ctx); | ||
3161 | } | ||
3162 | |||
3163 | /* reset doesn't touch the display, but flips might get nuked anyway, */ | ||
3117 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) | 3164 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) |
3118 | return; | 3165 | return; |
3119 | 3166 | ||
3120 | drm_modeset_lock_all(&dev_priv->drm); | ||
3121 | /* | 3167 | /* |
3122 | * Disabling the crtcs gracefully seems nicer. Also the | 3168 | * Disabling the crtcs gracefully seems nicer. Also the |
3123 | * g33 docs say we should at least disable all the planes. | 3169 | * g33 docs say we should at least disable all the planes. |
3124 | */ | 3170 | */ |
3125 | intel_display_suspend(&dev_priv->drm); | 3171 | state = drm_atomic_helper_duplicate_state(dev, ctx); |
3172 | if (IS_ERR(state)) { | ||
3173 | ret = PTR_ERR(state); | ||
3174 | state = NULL; | ||
3175 | DRM_ERROR("Duplicating state failed with %i\n", ret); | ||
3176 | goto err; | ||
3177 | } | ||
3178 | |||
3179 | ret = drm_atomic_helper_disable_all(dev, ctx); | ||
3180 | if (ret) { | ||
3181 | DRM_ERROR("Suspending crtc's failed with %i\n", ret); | ||
3182 | goto err; | ||
3183 | } | ||
3184 | |||
3185 | dev_priv->modeset_restore_state = state; | ||
3186 | state->acquire_ctx = ctx; | ||
3187 | return; | ||
3188 | |||
3189 | err: | ||
3190 | drm_atomic_state_free(state); | ||
3126 | } | 3191 | } |
3127 | 3192 | ||
3128 | void intel_finish_reset(struct drm_i915_private *dev_priv) | 3193 | void intel_finish_reset(struct drm_i915_private *dev_priv) |
3129 | { | 3194 | { |
3195 | struct drm_device *dev = &dev_priv->drm; | ||
3196 | struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; | ||
3197 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; | ||
3198 | int ret; | ||
3199 | |||
3130 | /* | 3200 | /* |
3131 | * Flips in the rings will be nuked by the reset, | 3201 | * Flips in the rings will be nuked by the reset, |
3132 | * so complete all pending flips so that user space | 3202 | * so complete all pending flips so that user space |
@@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
3138 | if (IS_GEN2(dev_priv)) | 3208 | if (IS_GEN2(dev_priv)) |
3139 | return; | 3209 | return; |
3140 | 3210 | ||
3211 | dev_priv->modeset_restore_state = NULL; | ||
3212 | |||
3141 | /* reset doesn't touch the display */ | 3213 | /* reset doesn't touch the display */ |
3142 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { | 3214 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { |
3143 | /* | 3215 | /* |
@@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
3149 | * FIXME: Atomic will make this obsolete since we won't schedule | 3221 | * FIXME: Atomic will make this obsolete since we won't schedule |
3150 | * CS-based flips (which might get lost in gpu resets) any more. | 3222 | * CS-based flips (which might get lost in gpu resets) any more. |
3151 | */ | 3223 | */ |
3152 | intel_update_primary_planes(&dev_priv->drm); | 3224 | intel_update_primary_planes(dev); |
3153 | return; | 3225 | } else { |
3154 | } | 3226 | /* |
3155 | 3227 | * The display has been reset as well, | |
3156 | /* | 3228 | * so need a full re-initialization. |
3157 | * The display has been reset as well, | 3229 | */ |
3158 | * so need a full re-initialization. | 3230 | intel_runtime_pm_disable_interrupts(dev_priv); |
3159 | */ | 3231 | intel_runtime_pm_enable_interrupts(dev_priv); |
3160 | intel_runtime_pm_disable_interrupts(dev_priv); | ||
3161 | intel_runtime_pm_enable_interrupts(dev_priv); | ||
3162 | 3232 | ||
3163 | intel_modeset_init_hw(&dev_priv->drm); | 3233 | intel_modeset_init_hw(dev); |
3164 | 3234 | ||
3165 | spin_lock_irq(&dev_priv->irq_lock); | 3235 | spin_lock_irq(&dev_priv->irq_lock); |
3166 | if (dev_priv->display.hpd_irq_setup) | 3236 | if (dev_priv->display.hpd_irq_setup) |
3167 | dev_priv->display.hpd_irq_setup(dev_priv); | 3237 | dev_priv->display.hpd_irq_setup(dev_priv); |
3168 | spin_unlock_irq(&dev_priv->irq_lock); | 3238 | spin_unlock_irq(&dev_priv->irq_lock); |
3169 | 3239 | ||
3170 | intel_display_resume(&dev_priv->drm); | 3240 | ret = __intel_display_resume(dev, state); |
3241 | if (ret) | ||
3242 | DRM_ERROR("Restoring old state failed with %i\n", ret); | ||
3171 | 3243 | ||
3172 | intel_hpd_init(dev_priv); | 3244 | intel_hpd_init(dev_priv); |
3245 | } | ||
3173 | 3246 | ||
3174 | drm_modeset_unlock_all(&dev_priv->drm); | 3247 | drm_modeset_drop_locks(ctx); |
3248 | drm_modeset_acquire_fini(ctx); | ||
3249 | mutex_unlock(&dev->mode_config.mutex); | ||
3175 | } | 3250 | } |
3176 | 3251 | ||
3177 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | 3252 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) |
@@ -13684,6 +13759,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
13684 | intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) | 13759 | intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) |
13685 | dev_priv->display.modeset_commit_cdclk(state); | 13760 | dev_priv->display.modeset_commit_cdclk(state); |
13686 | 13761 | ||
13762 | /* | ||
13763 | * SKL workaround: bspec recommends we disable the SAGV when we | ||
13764 | * have more then one pipe enabled | ||
13765 | */ | ||
13766 | if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state)) | ||
13767 | skl_disable_sagv(dev_priv); | ||
13768 | |||
13687 | intel_modeset_verify_disabled(dev); | 13769 | intel_modeset_verify_disabled(dev); |
13688 | } | 13770 | } |
13689 | 13771 | ||
@@ -13757,6 +13839,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
13757 | intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); | 13839 | intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); |
13758 | } | 13840 | } |
13759 | 13841 | ||
13842 | if (IS_SKYLAKE(dev_priv) && intel_state->modeset && | ||
13843 | skl_can_enable_sagv(state)) | ||
13844 | skl_enable_sagv(dev_priv); | ||
13845 | |||
13760 | drm_atomic_helper_commit_hw_done(state); | 13846 | drm_atomic_helper_commit_hw_done(state); |
13761 | 13847 | ||
13762 | if (intel_state->modeset) | 13848 | if (intel_state->modeset) |
@@ -16156,9 +16242,10 @@ void intel_display_resume(struct drm_device *dev) | |||
16156 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; | 16242 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; |
16157 | struct drm_modeset_acquire_ctx ctx; | 16243 | struct drm_modeset_acquire_ctx ctx; |
16158 | int ret; | 16244 | int ret; |
16159 | bool setup = false; | ||
16160 | 16245 | ||
16161 | dev_priv->modeset_restore_state = NULL; | 16246 | dev_priv->modeset_restore_state = NULL; |
16247 | if (state) | ||
16248 | state->acquire_ctx = &ctx; | ||
16162 | 16249 | ||
16163 | /* | 16250 | /* |
16164 | * This is a cludge because with real atomic modeset mode_config.mutex | 16251 | * This is a cludge because with real atomic modeset mode_config.mutex |
@@ -16169,43 +16256,17 @@ void intel_display_resume(struct drm_device *dev) | |||
16169 | mutex_lock(&dev->mode_config.mutex); | 16256 | mutex_lock(&dev->mode_config.mutex); |
16170 | drm_modeset_acquire_init(&ctx, 0); | 16257 | drm_modeset_acquire_init(&ctx, 0); |
16171 | 16258 | ||
16172 | retry: | 16259 | while (1) { |
16173 | ret = drm_modeset_lock_all_ctx(dev, &ctx); | 16260 | ret = drm_modeset_lock_all_ctx(dev, &ctx); |
16174 | 16261 | if (ret != -EDEADLK) | |
16175 | if (ret == 0 && !setup) { | 16262 | break; |
16176 | setup = true; | ||
16177 | |||
16178 | intel_modeset_setup_hw_state(dev); | ||
16179 | i915_redisable_vga(dev); | ||
16180 | } | ||
16181 | |||
16182 | if (ret == 0 && state) { | ||
16183 | struct drm_crtc_state *crtc_state; | ||
16184 | struct drm_crtc *crtc; | ||
16185 | int i; | ||
16186 | |||
16187 | state->acquire_ctx = &ctx; | ||
16188 | |||
16189 | /* ignore any reset values/BIOS leftovers in the WM registers */ | ||
16190 | to_intel_atomic_state(state)->skip_intermediate_wm = true; | ||
16191 | |||
16192 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
16193 | /* | ||
16194 | * Force recalculation even if we restore | ||
16195 | * current state. With fast modeset this may not result | ||
16196 | * in a modeset when the state is compatible. | ||
16197 | */ | ||
16198 | crtc_state->mode_changed = true; | ||
16199 | } | ||
16200 | |||
16201 | ret = drm_atomic_commit(state); | ||
16202 | } | ||
16203 | 16263 | ||
16204 | if (ret == -EDEADLK) { | ||
16205 | drm_modeset_backoff(&ctx); | 16264 | drm_modeset_backoff(&ctx); |
16206 | goto retry; | ||
16207 | } | 16265 | } |
16208 | 16266 | ||
16267 | if (!ret) | ||
16268 | ret = __intel_display_resume(dev, state); | ||
16269 | |||
16209 | drm_modeset_drop_locks(&ctx); | 16270 | drm_modeset_drop_locks(&ctx); |
16210 | drm_modeset_acquire_fini(&ctx); | 16271 | drm_modeset_acquire_fini(&ctx); |
16211 | mutex_unlock(&dev->mode_config.mutex); | 16272 | mutex_unlock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cc937a19b1ba..ff399b9a5c1f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -1716,6 +1716,9 @@ void ilk_wm_get_hw_state(struct drm_device *dev); | |||
1716 | void skl_wm_get_hw_state(struct drm_device *dev); | 1716 | void skl_wm_get_hw_state(struct drm_device *dev); |
1717 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | 1717 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
1718 | struct skl_ddb_allocation *ddb /* out */); | 1718 | struct skl_ddb_allocation *ddb /* out */); |
1719 | bool skl_can_enable_sagv(struct drm_atomic_state *state); | ||
1720 | int skl_enable_sagv(struct drm_i915_private *dev_priv); | ||
1721 | int skl_disable_sagv(struct drm_i915_private *dev_priv); | ||
1719 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); | 1722 | uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); |
1720 | bool ilk_disable_lp_wm(struct drm_device *dev); | 1723 | bool ilk_disable_lp_wm(struct drm_device *dev); |
1721 | int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); | 1724 | int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 6a7ad3ed1463..3836a1c79714 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) | |||
1230 | if (i915.enable_fbc >= 0) | 1230 | if (i915.enable_fbc >= 0) |
1231 | return !!i915.enable_fbc; | 1231 | return !!i915.enable_fbc; |
1232 | 1232 | ||
1233 | if (!HAS_FBC(dev_priv)) | ||
1234 | return 0; | ||
1235 | |||
1233 | if (IS_BROADWELL(dev_priv)) | 1236 | if (IS_BROADWELL(dev_priv)) |
1234 | return 1; | 1237 | return 1; |
1235 | 1238 | ||
1236 | return 0; | 1239 | return 0; |
1237 | } | 1240 | } |
1238 | 1241 | ||
1242 | static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) | ||
1243 | { | ||
1244 | #ifdef CONFIG_INTEL_IOMMU | ||
1245 | /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ | ||
1246 | if (intel_iommu_gfx_mapped && | ||
1247 | (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { | ||
1248 | DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); | ||
1249 | return true; | ||
1250 | } | ||
1251 | #endif | ||
1252 | |||
1253 | return false; | ||
1254 | } | ||
1255 | |||
1239 | /** | 1256 | /** |
1240 | * intel_fbc_init - Initialize FBC | 1257 | * intel_fbc_init - Initialize FBC |
1241 | * @dev_priv: the i915 device | 1258 | * @dev_priv: the i915 device |
@@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) | |||
1253 | fbc->active = false; | 1270 | fbc->active = false; |
1254 | fbc->work.scheduled = false; | 1271 | fbc->work.scheduled = false; |
1255 | 1272 | ||
1273 | if (need_fbc_vtd_wa(dev_priv)) | ||
1274 | mkwrite_device_info(dev_priv)->has_fbc = false; | ||
1275 | |||
1256 | i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); | 1276 | i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); |
1257 | DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); | 1277 | DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); |
1258 | 1278 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 97ba6c8cf907..53e13c10e4ea 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2852,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev) | |||
2852 | 2852 | ||
2853 | #define SKL_DDB_SIZE 896 /* in blocks */ | 2853 | #define SKL_DDB_SIZE 896 /* in blocks */ |
2854 | #define BXT_DDB_SIZE 512 | 2854 | #define BXT_DDB_SIZE 512 |
2855 | #define SKL_SAGV_BLOCK_TIME 30 /* µs */ | ||
2855 | 2856 | ||
2856 | /* | 2857 | /* |
2857 | * Return the index of a plane in the SKL DDB and wm result arrays. Primary | 2858 | * Return the index of a plane in the SKL DDB and wm result arrays. Primary |
@@ -2875,6 +2876,153 @@ skl_wm_plane_id(const struct intel_plane *plane) | |||
2875 | } | 2876 | } |
2876 | } | 2877 | } |
2877 | 2878 | ||
2879 | /* | ||
2880 | * SAGV dynamically adjusts the system agent voltage and clock frequencies | ||
2881 | * depending on power and performance requirements. The display engine access | ||
2882 | * to system memory is blocked during the adjustment time. Because of the | ||
2883 | * blocking time, having this enabled can cause full system hangs and/or pipe | ||
2884 | * underruns if we don't meet all of the following requirements: | ||
2885 | * | ||
2886 | * - <= 1 pipe enabled | ||
2887 | * - All planes can enable watermarks for latencies >= SAGV engine block time | ||
2888 | * - We're not using an interlaced display configuration | ||
2889 | */ | ||
2890 | int | ||
2891 | skl_enable_sagv(struct drm_i915_private *dev_priv) | ||
2892 | { | ||
2893 | int ret; | ||
2894 | |||
2895 | if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || | ||
2896 | dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED) | ||
2897 | return 0; | ||
2898 | |||
2899 | DRM_DEBUG_KMS("Enabling the SAGV\n"); | ||
2900 | mutex_lock(&dev_priv->rps.hw_lock); | ||
2901 | |||
2902 | ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, | ||
2903 | GEN9_SAGV_ENABLE); | ||
2904 | |||
2905 | /* We don't need to wait for the SAGV when enabling */ | ||
2906 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
2907 | |||
2908 | /* | ||
2909 | * Some skl systems, pre-release machines in particular, | ||
2910 | * don't actually have an SAGV. | ||
2911 | */ | ||
2912 | if (ret == -ENXIO) { | ||
2913 | DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); | ||
2914 | dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; | ||
2915 | return 0; | ||
2916 | } else if (ret < 0) { | ||
2917 | DRM_ERROR("Failed to enable the SAGV\n"); | ||
2918 | return ret; | ||
2919 | } | ||
2920 | |||
2921 | dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED; | ||
2922 | return 0; | ||
2923 | } | ||
2924 | |||
2925 | static int | ||
2926 | skl_do_sagv_disable(struct drm_i915_private *dev_priv) | ||
2927 | { | ||
2928 | int ret; | ||
2929 | uint32_t temp = GEN9_SAGV_DISABLE; | ||
2930 | |||
2931 | ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL, | ||
2932 | &temp); | ||
2933 | if (ret) | ||
2934 | return ret; | ||
2935 | else | ||
2936 | return temp & GEN9_SAGV_IS_DISABLED; | ||
2937 | } | ||
2938 | |||
2939 | int | ||
2940 | skl_disable_sagv(struct drm_i915_private *dev_priv) | ||
2941 | { | ||
2942 | int ret, result; | ||
2943 | |||
2944 | if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED || | ||
2945 | dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED) | ||
2946 | return 0; | ||
2947 | |||
2948 | DRM_DEBUG_KMS("Disabling the SAGV\n"); | ||
2949 | mutex_lock(&dev_priv->rps.hw_lock); | ||
2950 | |||
2951 | /* bspec says to keep retrying for at least 1 ms */ | ||
2952 | ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1); | ||
2953 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
2954 | |||
2955 | if (ret == -ETIMEDOUT) { | ||
2956 | DRM_ERROR("Request to disable SAGV timed out\n"); | ||
2957 | return -ETIMEDOUT; | ||
2958 | } | ||
2959 | |||
2960 | /* | ||
2961 | * Some skl systems, pre-release machines in particular, | ||
2962 | * don't actually have an SAGV. | ||
2963 | */ | ||
2964 | if (result == -ENXIO) { | ||
2965 | DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); | ||
2966 | dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED; | ||
2967 | return 0; | ||
2968 | } else if (result < 0) { | ||
2969 | DRM_ERROR("Failed to disable the SAGV\n"); | ||
2970 | return result; | ||
2971 | } | ||
2972 | |||
2973 | dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED; | ||
2974 | return 0; | ||
2975 | } | ||
2976 | |||
2977 | bool skl_can_enable_sagv(struct drm_atomic_state *state) | ||
2978 | { | ||
2979 | struct drm_device *dev = state->dev; | ||
2980 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
2981 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | ||
2982 | struct drm_crtc *crtc; | ||
2983 | enum pipe pipe; | ||
2984 | int level, plane; | ||
2985 | |||
2986 | /* | ||
2987 | * SKL workaround: bspec recommends we disable the SAGV when we have | ||
2988 | * more then one pipe enabled | ||
2989 | * | ||
2990 | * If there are no active CRTCs, no additional checks need be performed | ||
2991 | */ | ||
2992 | if (hweight32(intel_state->active_crtcs) == 0) | ||
2993 | return true; | ||
2994 | else if (hweight32(intel_state->active_crtcs) > 1) | ||
2995 | return false; | ||
2996 | |||
2997 | /* Since we're now guaranteed to only have one active CRTC... */ | ||
2998 | pipe = ffs(intel_state->active_crtcs) - 1; | ||
2999 | crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | ||
3000 | |||
3001 | if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE) | ||
3002 | return false; | ||
3003 | |||
3004 | for_each_plane(dev_priv, pipe, plane) { | ||
3005 | /* Skip this plane if it's not enabled */ | ||
3006 | if (intel_state->wm_results.plane[pipe][plane][0] == 0) | ||
3007 | continue; | ||
3008 | |||
3009 | /* Find the highest enabled wm level for this plane */ | ||
3010 | for (level = ilk_wm_max_level(dev); | ||
3011 | intel_state->wm_results.plane[pipe][plane][level] == 0; --level) | ||
3012 | { } | ||
3013 | |||
3014 | /* | ||
3015 | * If any of the planes on this pipe don't enable wm levels | ||
3016 | * that incur memory latencies higher then 30µs we can't enable | ||
3017 | * the SAGV | ||
3018 | */ | ||
3019 | if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME) | ||
3020 | return false; | ||
3021 | } | ||
3022 | |||
3023 | return true; | ||
3024 | } | ||
3025 | |||
2878 | static void | 3026 | static void |
2879 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | 3027 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, |
2880 | const struct intel_crtc_state *cstate, | 3028 | const struct intel_crtc_state *cstate, |
@@ -3107,8 +3255,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) | |||
3107 | total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; | 3255 | total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; |
3108 | } | 3256 | } |
3109 | 3257 | ||
3110 | WARN_ON(cstate->plane_mask && total_data_rate == 0); | ||
3111 | |||
3112 | return total_data_rate; | 3258 | return total_data_rate; |
3113 | } | 3259 | } |
3114 | 3260 | ||
@@ -3344,6 +3490,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, | |||
3344 | plane_bytes_per_line *= 4; | 3490 | plane_bytes_per_line *= 4; |
3345 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | 3491 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
3346 | plane_blocks_per_line /= 4; | 3492 | plane_blocks_per_line /= 4; |
3493 | } else if (tiling == DRM_FORMAT_MOD_NONE) { | ||
3494 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; | ||
3347 | } else { | 3495 | } else { |
3348 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | 3496 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
3349 | } | 3497 | } |
@@ -3910,9 +4058,24 @@ skl_compute_ddb(struct drm_atomic_state *state) | |||
3910 | * pretend that all pipes switched active status so that we'll | 4058 | * pretend that all pipes switched active status so that we'll |
3911 | * ensure a full DDB recompute. | 4059 | * ensure a full DDB recompute. |
3912 | */ | 4060 | */ |
3913 | if (dev_priv->wm.distrust_bios_wm) | 4061 | if (dev_priv->wm.distrust_bios_wm) { |
4062 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, | ||
4063 | state->acquire_ctx); | ||
4064 | if (ret) | ||
4065 | return ret; | ||
4066 | |||
3914 | intel_state->active_pipe_changes = ~0; | 4067 | intel_state->active_pipe_changes = ~0; |
3915 | 4068 | ||
4069 | /* | ||
4070 | * We usually only initialize intel_state->active_crtcs if we | ||
4071 | * we're doing a modeset; make sure this field is always | ||
4072 | * initialized during the sanitization process that happens | ||
4073 | * on the first commit too. | ||
4074 | */ | ||
4075 | if (!intel_state->modeset) | ||
4076 | intel_state->active_crtcs = dev_priv->active_crtcs; | ||
4077 | } | ||
4078 | |||
3916 | /* | 4079 | /* |
3917 | * If the modeset changes which CRTC's are active, we need to | 4080 | * If the modeset changes which CRTC's are active, we need to |
3918 | * recompute the DDB allocation for *all* active pipes, even | 4081 | * recompute the DDB allocation for *all* active pipes, even |
@@ -3941,11 +4104,33 @@ skl_compute_ddb(struct drm_atomic_state *state) | |||
3941 | ret = skl_allocate_pipe_ddb(cstate, ddb); | 4104 | ret = skl_allocate_pipe_ddb(cstate, ddb); |
3942 | if (ret) | 4105 | if (ret) |
3943 | return ret; | 4106 | return ret; |
4107 | |||
4108 | ret = drm_atomic_add_affected_planes(state, &intel_crtc->base); | ||
4109 | if (ret) | ||
4110 | return ret; | ||
3944 | } | 4111 | } |
3945 | 4112 | ||
3946 | return 0; | 4113 | return 0; |
3947 | } | 4114 | } |
3948 | 4115 | ||
4116 | static void | ||
4117 | skl_copy_wm_for_pipe(struct skl_wm_values *dst, | ||
4118 | struct skl_wm_values *src, | ||
4119 | enum pipe pipe) | ||
4120 | { | ||
4121 | dst->wm_linetime[pipe] = src->wm_linetime[pipe]; | ||
4122 | memcpy(dst->plane[pipe], src->plane[pipe], | ||
4123 | sizeof(dst->plane[pipe])); | ||
4124 | memcpy(dst->plane_trans[pipe], src->plane_trans[pipe], | ||
4125 | sizeof(dst->plane_trans[pipe])); | ||
4126 | |||
4127 | dst->ddb.pipe[pipe] = src->ddb.pipe[pipe]; | ||
4128 | memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe], | ||
4129 | sizeof(dst->ddb.y_plane[pipe])); | ||
4130 | memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe], | ||
4131 | sizeof(dst->ddb.plane[pipe])); | ||
4132 | } | ||
4133 | |||
3949 | static int | 4134 | static int |
3950 | skl_compute_wm(struct drm_atomic_state *state) | 4135 | skl_compute_wm(struct drm_atomic_state *state) |
3951 | { | 4136 | { |
@@ -4018,8 +4203,10 @@ static void skl_update_wm(struct drm_crtc *crtc) | |||
4018 | struct drm_device *dev = crtc->dev; | 4203 | struct drm_device *dev = crtc->dev; |
4019 | struct drm_i915_private *dev_priv = to_i915(dev); | 4204 | struct drm_i915_private *dev_priv = to_i915(dev); |
4020 | struct skl_wm_values *results = &dev_priv->wm.skl_results; | 4205 | struct skl_wm_values *results = &dev_priv->wm.skl_results; |
4206 | struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw; | ||
4021 | struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); | 4207 | struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); |
4022 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; | 4208 | struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; |
4209 | int pipe; | ||
4023 | 4210 | ||
4024 | if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) | 4211 | if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) |
4025 | return; | 4212 | return; |
@@ -4031,8 +4218,12 @@ static void skl_update_wm(struct drm_crtc *crtc) | |||
4031 | skl_write_wm_values(dev_priv, results); | 4218 | skl_write_wm_values(dev_priv, results); |
4032 | skl_flush_wm_values(dev_priv, results); | 4219 | skl_flush_wm_values(dev_priv, results); |
4033 | 4220 | ||
4034 | /* store the new configuration */ | 4221 | /* |
4035 | dev_priv->wm.skl_hw = *results; | 4222 | * Store the new configuration (but only for the pipes that have |
4223 | * changed; the other values weren't recomputed). | ||
4224 | */ | ||
4225 | for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes) | ||
4226 | skl_copy_wm_for_pipe(hw_vals, results, pipe); | ||
4036 | 4227 | ||
4037 | mutex_unlock(&dev_priv->wm.wm_mutex); | 4228 | mutex_unlock(&dev_priv->wm.wm_mutex); |
4038 | } | 4229 | } |
@@ -6574,9 +6765,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) | |||
6574 | 6765 | ||
6575 | void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) | 6766 | void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) |
6576 | { | 6767 | { |
6577 | if (IS_CHERRYVIEW(dev_priv)) | 6768 | if (IS_VALLEYVIEW(dev_priv)) |
6578 | return; | ||
6579 | else if (IS_VALLEYVIEW(dev_priv)) | ||
6580 | valleyview_cleanup_gt_powersave(dev_priv); | 6769 | valleyview_cleanup_gt_powersave(dev_priv); |
6581 | 6770 | ||
6582 | if (!i915.enable_rc6) | 6771 | if (!i915.enable_rc6) |
@@ -7658,8 +7847,53 @@ void intel_init_pm(struct drm_device *dev) | |||
7658 | } | 7847 | } |
7659 | } | 7848 | } |
7660 | 7849 | ||
7850 | static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) | ||
7851 | { | ||
7852 | uint32_t flags = | ||
7853 | I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; | ||
7854 | |||
7855 | switch (flags) { | ||
7856 | case GEN6_PCODE_SUCCESS: | ||
7857 | return 0; | ||
7858 | case GEN6_PCODE_UNIMPLEMENTED_CMD: | ||
7859 | case GEN6_PCODE_ILLEGAL_CMD: | ||
7860 | return -ENXIO; | ||
7861 | case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: | ||
7862 | return -EOVERFLOW; | ||
7863 | case GEN6_PCODE_TIMEOUT: | ||
7864 | return -ETIMEDOUT; | ||
7865 | default: | ||
7866 | MISSING_CASE(flags) | ||
7867 | return 0; | ||
7868 | } | ||
7869 | } | ||
7870 | |||
7871 | static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv) | ||
7872 | { | ||
7873 | uint32_t flags = | ||
7874 | I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; | ||
7875 | |||
7876 | switch (flags) { | ||
7877 | case GEN6_PCODE_SUCCESS: | ||
7878 | return 0; | ||
7879 | case GEN6_PCODE_ILLEGAL_CMD: | ||
7880 | return -ENXIO; | ||
7881 | case GEN7_PCODE_TIMEOUT: | ||
7882 | return -ETIMEDOUT; | ||
7883 | case GEN7_PCODE_ILLEGAL_DATA: | ||
7884 | return -EINVAL; | ||
7885 | case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: | ||
7886 | return -EOVERFLOW; | ||
7887 | default: | ||
7888 | MISSING_CASE(flags); | ||
7889 | return 0; | ||
7890 | } | ||
7891 | } | ||
7892 | |||
7661 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) | 7893 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) |
7662 | { | 7894 | { |
7895 | int status; | ||
7896 | |||
7663 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7897 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
7664 | 7898 | ||
7665 | /* GEN6_PCODE_* are outside of the forcewake domain, we can | 7899 | /* GEN6_PCODE_* are outside of the forcewake domain, we can |
@@ -7686,12 +7920,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val | |||
7686 | *val = I915_READ_FW(GEN6_PCODE_DATA); | 7920 | *val = I915_READ_FW(GEN6_PCODE_DATA); |
7687 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); | 7921 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); |
7688 | 7922 | ||
7923 | if (INTEL_GEN(dev_priv) > 6) | ||
7924 | status = gen7_check_mailbox_status(dev_priv); | ||
7925 | else | ||
7926 | status = gen6_check_mailbox_status(dev_priv); | ||
7927 | |||
7928 | if (status) { | ||
7929 | DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n", | ||
7930 | status); | ||
7931 | return status; | ||
7932 | } | ||
7933 | |||
7689 | return 0; | 7934 | return 0; |
7690 | } | 7935 | } |
7691 | 7936 | ||
7692 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, | 7937 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, |
7693 | u32 mbox, u32 val) | 7938 | u32 mbox, u32 val) |
7694 | { | 7939 | { |
7940 | int status; | ||
7941 | |||
7695 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7942 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
7696 | 7943 | ||
7697 | /* GEN6_PCODE_* are outside of the forcewake domain, we can | 7944 | /* GEN6_PCODE_* are outside of the forcewake domain, we can |
@@ -7716,6 +7963,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, | |||
7716 | 7963 | ||
7717 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); | 7964 | I915_WRITE_FW(GEN6_PCODE_DATA, 0); |
7718 | 7965 | ||
7966 | if (INTEL_GEN(dev_priv) > 6) | ||
7967 | status = gen7_check_mailbox_status(dev_priv); | ||
7968 | else | ||
7969 | status = gen6_check_mailbox_status(dev_priv); | ||
7970 | |||
7971 | if (status) { | ||
7972 | DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n", | ||
7973 | status); | ||
7974 | return status; | ||
7975 | } | ||
7976 | |||
7719 | return 0; | 7977 | return 0; |
7720 | } | 7978 | } |
7721 | 7979 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cca7792f26d5..1d3161bbea24 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
1178 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | | 1178 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | |
1179 | L3_HIGH_PRIO_CREDITS(2)); | 1179 | L3_HIGH_PRIO_CREDITS(2)); |
1180 | 1180 | ||
1181 | /* WaInsertDummyPushConstPs:bxt */ | 1181 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
1182 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) | 1182 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) |
1183 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1183 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
1184 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 1184 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
1185 | 1185 | ||
@@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) | |||
1222 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | 1222 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | |
1223 | GEN8_LQSC_RO_PERF_DIS); | 1223 | GEN8_LQSC_RO_PERF_DIS); |
1224 | 1224 | ||
1225 | /* WaInsertDummyPushConstPs:kbl */ | 1225 | /* WaToEnableHwFixForPushConstHWBug:kbl */ |
1226 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | 1226 | if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) |
1227 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1227 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
1228 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 1228 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
1229 | 1229 | ||
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 23ac8041c562..294de4549922 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig | |||
@@ -2,6 +2,9 @@ config DRM_MEDIATEK | |||
2 | tristate "DRM Support for Mediatek SoCs" | 2 | tristate "DRM Support for Mediatek SoCs" |
3 | depends on DRM | 3 | depends on DRM |
4 | depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) | 4 | depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) |
5 | depends on COMMON_CLK | ||
6 | depends on HAVE_ARM_SMCCC | ||
7 | depends on OF | ||
5 | select DRM_GEM_CMA_HELPER | 8 | select DRM_GEM_CMA_HELPER |
6 | select DRM_KMS_HELPER | 9 | select DRM_KMS_HELPER |
7 | select DRM_MIPI_DSI | 10 | select DRM_MIPI_DSI |
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index df2657051afd..28c1423049c5 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
@@ -73,10 +73,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, | |||
73 | } | 73 | } |
74 | } | 74 | } |
75 | 75 | ||
76 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
76 | static struct fb_deferred_io qxl_defio = { | 77 | static struct fb_deferred_io qxl_defio = { |
77 | .delay = QXL_DIRTY_DELAY, | 78 | .delay = QXL_DIRTY_DELAY, |
78 | .deferred_io = drm_fb_helper_deferred_io, | 79 | .deferred_io = drm_fb_helper_deferred_io, |
79 | }; | 80 | }; |
81 | #endif | ||
80 | 82 | ||
81 | static struct fb_ops qxlfb_ops = { | 83 | static struct fb_ops qxlfb_ops = { |
82 | .owner = THIS_MODULE, | 84 | .owner = THIS_MODULE, |
@@ -313,8 +315,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, | |||
313 | goto out_destroy_fbi; | 315 | goto out_destroy_fbi; |
314 | } | 316 | } |
315 | 317 | ||
318 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
316 | info->fbdefio = &qxl_defio; | 319 | info->fbdefio = &qxl_defio; |
317 | fb_deferred_io_init(info); | 320 | fb_deferred_io_init(info); |
321 | #endif | ||
318 | 322 | ||
319 | qdev->fbdev_info = info; | 323 | qdev->fbdev_info = info; |
320 | qdev->fbdev_qfb = &qfbdev->qfb; | 324 | qdev->fbdev_qfb = &qfbdev->qfb; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a97abc8af657..1dcf39084555 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -627,7 +627,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
627 | if (radeon_crtc->ss.refdiv) { | 627 | if (radeon_crtc->ss.refdiv) { |
628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; | 628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; |
629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; | 629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; |
630 | if (rdev->family >= CHIP_RV770) | 630 | if (ASIC_IS_AVIVO(rdev) && |
631 | rdev->family != CHIP_RS780 && | ||
632 | rdev->family != CHIP_RS880) | ||
631 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 633 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
632 | } | 634 | } |
633 | } | 635 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6de342861202..ddef0d494084 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
@@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) | |||
198 | atpx->is_hybrid = false; | 198 | atpx->is_hybrid = false; |
199 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 199 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
200 | printk("ATPX Hybrid Graphics\n"); | 200 | printk("ATPX Hybrid Graphics\n"); |
201 | #if 1 | ||
202 | /* This is a temporary hack until the D3 cold support | ||
203 | * makes it upstream. The ATPX power_control method seems | ||
204 | * to still work on even if the system should be using | ||
205 | * the new standardized hybrid D3 cold ACPI interface. | ||
206 | */ | ||
207 | atpx->functions.power_cntl = true; | ||
208 | #else | ||
209 | atpx->functions.power_cntl = false; | 201 | atpx->functions.power_cntl = false; |
210 | #endif | ||
211 | atpx->is_hybrid = true; | 202 | atpx->is_hybrid = true; |
212 | } | 203 | } |
213 | 204 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0c00e192c845..c2e0a1ccdfbc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
263 | 263 | ||
264 | rdev = radeon_get_rdev(bo->bdev); | 264 | rdev = radeon_get_rdev(bo->bdev); |
265 | ridx = radeon_copy_ring_index(rdev); | 265 | ridx = radeon_copy_ring_index(rdev); |
266 | old_start = old_mem->start << PAGE_SHIFT; | 266 | old_start = (u64)old_mem->start << PAGE_SHIFT; |
267 | new_start = new_mem->start << PAGE_SHIFT; | 267 | new_start = (u64)new_mem->start << PAGE_SHIFT; |
268 | 268 | ||
269 | switch (old_mem->mem_type) { | 269 | switch (old_mem->mem_type) { |
270 | case TTM_PL_VRAM: | 270 | case TTM_PL_VRAM: |
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 3d228ad90e0f..3dea1216bafd 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c | |||
@@ -840,6 +840,21 @@ static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { | |||
840 | .destroy = tegra_output_encoder_destroy, | 840 | .destroy = tegra_output_encoder_destroy, |
841 | }; | 841 | }; |
842 | 842 | ||
843 | static void tegra_dsi_unprepare(struct tegra_dsi *dsi) | ||
844 | { | ||
845 | int err; | ||
846 | |||
847 | if (dsi->slave) | ||
848 | tegra_dsi_unprepare(dsi->slave); | ||
849 | |||
850 | err = tegra_mipi_disable(dsi->mipi); | ||
851 | if (err < 0) | ||
852 | dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n", | ||
853 | err); | ||
854 | |||
855 | pm_runtime_put(dsi->dev); | ||
856 | } | ||
857 | |||
843 | static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) | 858 | static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) |
844 | { | 859 | { |
845 | struct tegra_output *output = encoder_to_output(encoder); | 860 | struct tegra_output *output = encoder_to_output(encoder); |
@@ -876,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) | |||
876 | 891 | ||
877 | tegra_dsi_disable(dsi); | 892 | tegra_dsi_disable(dsi); |
878 | 893 | ||
879 | pm_runtime_put(dsi->dev); | 894 | tegra_dsi_unprepare(dsi); |
895 | } | ||
896 | |||
897 | static void tegra_dsi_prepare(struct tegra_dsi *dsi) | ||
898 | { | ||
899 | int err; | ||
900 | |||
901 | pm_runtime_get_sync(dsi->dev); | ||
902 | |||
903 | err = tegra_mipi_enable(dsi->mipi); | ||
904 | if (err < 0) | ||
905 | dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n", | ||
906 | err); | ||
907 | |||
908 | err = tegra_dsi_pad_calibrate(dsi); | ||
909 | if (err < 0) | ||
910 | dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); | ||
911 | |||
912 | if (dsi->slave) | ||
913 | tegra_dsi_prepare(dsi->slave); | ||
880 | } | 914 | } |
881 | 915 | ||
882 | static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) | 916 | static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) |
@@ -887,13 +921,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) | |||
887 | struct tegra_dsi *dsi = to_dsi(output); | 921 | struct tegra_dsi *dsi = to_dsi(output); |
888 | struct tegra_dsi_state *state; | 922 | struct tegra_dsi_state *state; |
889 | u32 value; | 923 | u32 value; |
890 | int err; | ||
891 | |||
892 | pm_runtime_get_sync(dsi->dev); | ||
893 | 924 | ||
894 | err = tegra_dsi_pad_calibrate(dsi); | 925 | tegra_dsi_prepare(dsi); |
895 | if (err < 0) | ||
896 | dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); | ||
897 | 926 | ||
898 | state = tegra_dsi_get_state(dsi); | 927 | state = tegra_dsi_get_state(dsi); |
899 | 928 | ||
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index d5df555aeba0..9688bfa92ccd 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user) | |||
203 | 203 | ||
204 | ufbdev->fb_count++; | 204 | ufbdev->fb_count++; |
205 | 205 | ||
206 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
206 | if (fb_defio && (info->fbdefio == NULL)) { | 207 | if (fb_defio && (info->fbdefio == NULL)) { |
207 | /* enable defio at last moment if not disabled by client */ | 208 | /* enable defio at last moment if not disabled by client */ |
208 | 209 | ||
@@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user) | |||
218 | info->fbdefio = fbdefio; | 219 | info->fbdefio = fbdefio; |
219 | fb_deferred_io_init(info); | 220 | fb_deferred_io_init(info); |
220 | } | 221 | } |
222 | #endif | ||
221 | 223 | ||
222 | pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", | 224 | pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", |
223 | info->node, user, info, ufbdev->fb_count); | 225 | info->node, user, info, ufbdev->fb_count); |
@@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user) | |||
235 | 237 | ||
236 | ufbdev->fb_count--; | 238 | ufbdev->fb_count--; |
237 | 239 | ||
240 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
238 | if ((ufbdev->fb_count == 0) && (info->fbdefio)) { | 241 | if ((ufbdev->fb_count == 0) && (info->fbdefio)) { |
239 | fb_deferred_io_cleanup(info); | 242 | fb_deferred_io_cleanup(info); |
240 | kfree(info->fbdefio); | 243 | kfree(info->fbdefio); |
241 | info->fbdefio = NULL; | 244 | info->fbdefio = NULL; |
242 | info->fbops->fb_mmap = udl_fb_mmap; | 245 | info->fbops->fb_mmap = udl_fb_mmap; |
243 | } | 246 | } |
247 | #endif | ||
244 | 248 | ||
245 | pr_warn("released /dev/fb%d user=%d count=%d\n", | 249 | pr_warn("released /dev/fb%d user=%d count=%d\n", |
246 | info->node, user, ufbdev->fb_count); | 250 | info->node, user, ufbdev->fb_count); |
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c index 52a6fd224127..e00809d996a2 100644 --- a/drivers/gpu/host1x/mipi.c +++ b/drivers/gpu/host1x/mipi.c | |||
@@ -242,20 +242,6 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device) | |||
242 | dev->pads = args.args[0]; | 242 | dev->pads = args.args[0]; |
243 | dev->device = device; | 243 | dev->device = device; |
244 | 244 | ||
245 | mutex_lock(&dev->mipi->lock); | ||
246 | |||
247 | if (dev->mipi->usage_count++ == 0) { | ||
248 | err = tegra_mipi_power_up(dev->mipi); | ||
249 | if (err < 0) { | ||
250 | dev_err(dev->mipi->dev, | ||
251 | "failed to power up MIPI bricks: %d\n", | ||
252 | err); | ||
253 | return ERR_PTR(err); | ||
254 | } | ||
255 | } | ||
256 | |||
257 | mutex_unlock(&dev->mipi->lock); | ||
258 | |||
259 | return dev; | 245 | return dev; |
260 | 246 | ||
261 | put: | 247 | put: |
@@ -270,29 +256,42 @@ EXPORT_SYMBOL(tegra_mipi_request); | |||
270 | 256 | ||
271 | void tegra_mipi_free(struct tegra_mipi_device *device) | 257 | void tegra_mipi_free(struct tegra_mipi_device *device) |
272 | { | 258 | { |
273 | int err; | 259 | platform_device_put(device->pdev); |
260 | kfree(device); | ||
261 | } | ||
262 | EXPORT_SYMBOL(tegra_mipi_free); | ||
274 | 263 | ||
275 | mutex_lock(&device->mipi->lock); | 264 | int tegra_mipi_enable(struct tegra_mipi_device *dev) |
265 | { | ||
266 | int err = 0; | ||
276 | 267 | ||
277 | if (--device->mipi->usage_count == 0) { | 268 | mutex_lock(&dev->mipi->lock); |
278 | err = tegra_mipi_power_down(device->mipi); | ||
279 | if (err < 0) { | ||
280 | /* | ||
281 | * Not much that can be done here, so an error message | ||
282 | * will have to do. | ||
283 | */ | ||
284 | dev_err(device->mipi->dev, | ||
285 | "failed to power down MIPI bricks: %d\n", | ||
286 | err); | ||
287 | } | ||
288 | } | ||
289 | 269 | ||
290 | mutex_unlock(&device->mipi->lock); | 270 | if (dev->mipi->usage_count++ == 0) |
271 | err = tegra_mipi_power_up(dev->mipi); | ||
272 | |||
273 | mutex_unlock(&dev->mipi->lock); | ||
274 | |||
275 | return err; | ||
291 | 276 | ||
292 | platform_device_put(device->pdev); | ||
293 | kfree(device); | ||
294 | } | 277 | } |
295 | EXPORT_SYMBOL(tegra_mipi_free); | 278 | EXPORT_SYMBOL(tegra_mipi_enable); |
279 | |||
280 | int tegra_mipi_disable(struct tegra_mipi_device *dev) | ||
281 | { | ||
282 | int err = 0; | ||
283 | |||
284 | mutex_lock(&dev->mipi->lock); | ||
285 | |||
286 | if (--dev->mipi->usage_count == 0) | ||
287 | err = tegra_mipi_power_down(dev->mipi); | ||
288 | |||
289 | mutex_unlock(&dev->mipi->lock); | ||
290 | |||
291 | return err; | ||
292 | |||
293 | } | ||
294 | EXPORT_SYMBOL(tegra_mipi_disable); | ||
296 | 295 | ||
297 | static int tegra_mipi_wait(struct tegra_mipi *mipi) | 296 | static int tegra_mipi_wait(struct tegra_mipi *mipi) |
298 | { | 297 | { |
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 730d84028260..d0203a115eff 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c | |||
@@ -491,7 +491,7 @@ struct it87_sio_data { | |||
491 | struct it87_data { | 491 | struct it87_data { |
492 | const struct attribute_group *groups[7]; | 492 | const struct attribute_group *groups[7]; |
493 | enum chips type; | 493 | enum chips type; |
494 | u16 features; | 494 | u32 features; |
495 | u8 peci_mask; | 495 | u8 peci_mask; |
496 | u8 old_peci_mask; | 496 | u8 old_peci_mask; |
497 | 497 | ||
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index f23372669f77..1bb97f658b47 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ | 38 | #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ |
39 | #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ | 39 | #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ |
40 | #define AUTOSUSPEND_TIMEOUT 2000 | 40 | #define AUTOSUSPEND_TIMEOUT 2000 |
41 | #define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256 | ||
41 | 42 | ||
42 | /* AT91 TWI register definitions */ | 43 | /* AT91 TWI register definitions */ |
43 | #define AT91_TWI_CR 0x0000 /* Control Register */ | 44 | #define AT91_TWI_CR 0x0000 /* Control Register */ |
@@ -141,6 +142,7 @@ struct at91_twi_dev { | |||
141 | unsigned twi_cwgr_reg; | 142 | unsigned twi_cwgr_reg; |
142 | struct at91_twi_pdata *pdata; | 143 | struct at91_twi_pdata *pdata; |
143 | bool use_dma; | 144 | bool use_dma; |
145 | bool use_alt_cmd; | ||
144 | bool recv_len_abort; | 146 | bool recv_len_abort; |
145 | u32 fifo_size; | 147 | u32 fifo_size; |
146 | struct at91_twi_dma dma; | 148 | struct at91_twi_dma dma; |
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) | |||
269 | 271 | ||
270 | /* send stop when last byte has been written */ | 272 | /* send stop when last byte has been written */ |
271 | if (--dev->buf_len == 0) | 273 | if (--dev->buf_len == 0) |
272 | if (!dev->pdata->has_alt_cmd) | 274 | if (!dev->use_alt_cmd) |
273 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 275 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
274 | 276 | ||
275 | dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); | 277 | dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); |
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data) | |||
292 | * we just have to enable TXCOMP one. | 294 | * we just have to enable TXCOMP one. |
293 | */ | 295 | */ |
294 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); | 296 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); |
295 | if (!dev->pdata->has_alt_cmd) | 297 | if (!dev->use_alt_cmd) |
296 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 298 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
297 | } | 299 | } |
298 | 300 | ||
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) | |||
410 | } | 412 | } |
411 | 413 | ||
412 | /* send stop if second but last byte has been read */ | 414 | /* send stop if second but last byte has been read */ |
413 | if (!dev->pdata->has_alt_cmd && dev->buf_len == 1) | 415 | if (!dev->use_alt_cmd && dev->buf_len == 1) |
414 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 416 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
415 | 417 | ||
416 | dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); | 418 | dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); |
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data) | |||
426 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), | 428 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), |
427 | dev->buf_len, DMA_FROM_DEVICE); | 429 | dev->buf_len, DMA_FROM_DEVICE); |
428 | 430 | ||
429 | if (!dev->pdata->has_alt_cmd) { | 431 | if (!dev->use_alt_cmd) { |
430 | /* The last two bytes have to be read without using dma */ | 432 | /* The last two bytes have to be read without using dma */ |
431 | dev->buf += dev->buf_len - 2; | 433 | dev->buf += dev->buf_len - 2; |
432 | dev->buf_len = 2; | 434 | dev->buf_len = 2; |
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev) | |||
443 | struct dma_chan *chan_rx = dma->chan_rx; | 445 | struct dma_chan *chan_rx = dma->chan_rx; |
444 | size_t buf_len; | 446 | size_t buf_len; |
445 | 447 | ||
446 | buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2; | 448 | buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2; |
447 | dma->direction = DMA_FROM_DEVICE; | 449 | dma->direction = DMA_FROM_DEVICE; |
448 | 450 | ||
449 | /* Keep in mind that we won't use dma to read the last two bytes */ | 451 | /* Keep in mind that we won't use dma to read the last two bytes */ |
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) | |||
651 | unsigned start_flags = AT91_TWI_START; | 653 | unsigned start_flags = AT91_TWI_START; |
652 | 654 | ||
653 | /* if only one byte is to be read, immediately stop transfer */ | 655 | /* if only one byte is to be read, immediately stop transfer */ |
654 | if (!has_alt_cmd && dev->buf_len <= 1 && | 656 | if (!dev->use_alt_cmd && dev->buf_len <= 1 && |
655 | !(dev->msg->flags & I2C_M_RECV_LEN)) | 657 | !(dev->msg->flags & I2C_M_RECV_LEN)) |
656 | start_flags |= AT91_TWI_STOP; | 658 | start_flags |= AT91_TWI_STOP; |
657 | at91_twi_write(dev, AT91_TWI_CR, start_flags); | 659 | at91_twi_write(dev, AT91_TWI_CR, start_flags); |
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
745 | int ret; | 747 | int ret; |
746 | unsigned int_addr_flag = 0; | 748 | unsigned int_addr_flag = 0; |
747 | struct i2c_msg *m_start = msg; | 749 | struct i2c_msg *m_start = msg; |
748 | bool is_read, use_alt_cmd = false; | 750 | bool is_read; |
749 | 751 | ||
750 | dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); | 752 | dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); |
751 | 753 | ||
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
768 | at91_twi_write(dev, AT91_TWI_IADR, internal_address); | 770 | at91_twi_write(dev, AT91_TWI_IADR, internal_address); |
769 | } | 771 | } |
770 | 772 | ||
773 | dev->use_alt_cmd = false; | ||
771 | is_read = (m_start->flags & I2C_M_RD); | 774 | is_read = (m_start->flags & I2C_M_RD); |
772 | if (dev->pdata->has_alt_cmd) { | 775 | if (dev->pdata->has_alt_cmd) { |
773 | if (m_start->len > 0) { | 776 | if (m_start->len > 0 && |
777 | m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) { | ||
774 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); | 778 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); |
775 | at91_twi_write(dev, AT91_TWI_ACR, | 779 | at91_twi_write(dev, AT91_TWI_ACR, |
776 | AT91_TWI_ACR_DATAL(m_start->len) | | 780 | AT91_TWI_ACR_DATAL(m_start->len) | |
777 | ((is_read) ? AT91_TWI_ACR_DIR : 0)); | 781 | ((is_read) ? AT91_TWI_ACR_DIR : 0)); |
778 | use_alt_cmd = true; | 782 | dev->use_alt_cmd = true; |
779 | } else { | 783 | } else { |
780 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); | 784 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); |
781 | } | 785 | } |
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
784 | at91_twi_write(dev, AT91_TWI_MMR, | 788 | at91_twi_write(dev, AT91_TWI_MMR, |
785 | (m_start->addr << 16) | | 789 | (m_start->addr << 16) | |
786 | int_addr_flag | | 790 | int_addr_flag | |
787 | ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); | 791 | ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); |
788 | 792 | ||
789 | dev->buf_len = m_start->len; | 793 | dev->buf_len = m_start->len; |
790 | dev->buf = m_start->buf; | 794 | dev->buf = m_start->buf; |
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 19c843828fe2..95f7cac76f89 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c | |||
@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) | |||
158 | 158 | ||
159 | if (status & BIT(IS_M_START_BUSY_SHIFT)) { | 159 | if (status & BIT(IS_M_START_BUSY_SHIFT)) { |
160 | iproc_i2c->xfer_is_done = 1; | 160 | iproc_i2c->xfer_is_done = 1; |
161 | complete_all(&iproc_i2c->done); | 161 | complete(&iproc_i2c->done); |
162 | } | 162 | } |
163 | 163 | ||
164 | writel(status, iproc_i2c->base + IS_OFFSET); | 164 | writel(status, iproc_i2c->base + IS_OFFSET); |
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index ac9f47679c3a..f98743277e3c 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c | |||
@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) | |||
229 | dev->base + TXFCR_OFFSET); | 229 | dev->base + TXFCR_OFFSET); |
230 | 230 | ||
231 | writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); | 231 | writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); |
232 | complete_all(&dev->done); | 232 | complete(&dev->done); |
233 | 233 | ||
234 | return IRQ_HANDLED; | 234 | return IRQ_HANDLED; |
235 | } | 235 | } |
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 3f5a4d71d3bf..385b57bfcb38 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c | |||
@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid) | |||
228 | return IRQ_NONE; | 228 | return IRQ_NONE; |
229 | 229 | ||
230 | brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); | 230 | brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); |
231 | complete_all(&dev->done); | 231 | complete(&dev->done); |
232 | 232 | ||
233 | dev_dbg(dev->device, "isr handled"); | 233 | dev_dbg(dev->device, "isr handled"); |
234 | return IRQ_HANDLED; | 234 | return IRQ_HANDLED; |
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index a0d95ff682ae..2d5ff86398d0 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c | |||
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], | |||
215 | msg->outsize = request_len; | 215 | msg->outsize = request_len; |
216 | msg->insize = response_len; | 216 | msg->insize = response_len; |
217 | 217 | ||
218 | result = cros_ec_cmd_xfer(bus->ec, msg); | 218 | result = cros_ec_cmd_xfer_status(bus->ec, msg); |
219 | if (result < 0) { | 219 | if (result < 0) { |
220 | dev_err(dev, "Error transferring EC i2c message %d\n", result); | 220 | dev_err(dev, "Error transferring EC i2c message %d\n", result); |
221 | goto exit; | 221 | goto exit; |
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 71d3929adf54..76e28980904f 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c | |||
@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c) | |||
211 | meson_i2c_add_token(i2c, TOKEN_STOP); | 211 | meson_i2c_add_token(i2c, TOKEN_STOP); |
212 | } else { | 212 | } else { |
213 | i2c->state = STATE_IDLE; | 213 | i2c->state = STATE_IDLE; |
214 | complete_all(&i2c->done); | 214 | complete(&i2c->done); |
215 | } | 215 | } |
216 | } | 216 | } |
217 | 217 | ||
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) | |||
238 | dev_dbg(i2c->dev, "error bit set\n"); | 238 | dev_dbg(i2c->dev, "error bit set\n"); |
239 | i2c->error = -ENXIO; | 239 | i2c->error = -ENXIO; |
240 | i2c->state = STATE_IDLE; | 240 | i2c->state = STATE_IDLE; |
241 | complete_all(&i2c->done); | 241 | complete(&i2c->done); |
242 | goto out; | 242 | goto out; |
243 | } | 243 | } |
244 | 244 | ||
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) | |||
269 | break; | 269 | break; |
270 | case STATE_STOP: | 270 | case STATE_STOP: |
271 | i2c->state = STATE_IDLE; | 271 | i2c->state = STATE_IDLE; |
272 | complete_all(&i2c->done); | 272 | complete(&i2c->done); |
273 | break; | 273 | break; |
274 | case STATE_IDLE: | 274 | case STATE_IDLE: |
275 | break; | 275 | break; |
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index dfa7a4b4a91d..ac88a524143e 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, | |||
379 | if (!clock_frequency_present) { | 379 | if (!clock_frequency_present) { |
380 | dev_err(&pdev->dev, | 380 | dev_err(&pdev->dev, |
381 | "Missing required parameter 'opencores,ip-clock-frequency'\n"); | 381 | "Missing required parameter 'opencores,ip-clock-frequency'\n"); |
382 | clk_disable_unprepare(i2c->clk); | ||
382 | return -ENODEV; | 383 | return -ENODEV; |
383 | } | 384 | } |
384 | i2c->ip_clock_khz = clock_frequency / 1000; | 385 | i2c->ip_clock_khz = clock_frequency / 1000; |
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
467 | default: | 468 | default: |
468 | dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", | 469 | dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", |
469 | i2c->reg_io_width); | 470 | i2c->reg_io_width); |
470 | return -EINVAL; | 471 | ret = -EINVAL; |
472 | goto err_clk; | ||
471 | } | 473 | } |
472 | } | 474 | } |
473 | 475 | ||
474 | ret = ocores_init(&pdev->dev, i2c); | 476 | ret = ocores_init(&pdev->dev, i2c); |
475 | if (ret) | 477 | if (ret) |
476 | return ret; | 478 | goto err_clk; |
477 | 479 | ||
478 | init_waitqueue_head(&i2c->wait); | 480 | init_waitqueue_head(&i2c->wait); |
479 | ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, | 481 | ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, |
480 | pdev->name, i2c); | 482 | pdev->name, i2c); |
481 | if (ret) { | 483 | if (ret) { |
482 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | 484 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); |
483 | return ret; | 485 | goto err_clk; |
484 | } | 486 | } |
485 | 487 | ||
486 | /* hook up driver to tree */ | 488 | /* hook up driver to tree */ |
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
494 | ret = i2c_add_adapter(&i2c->adap); | 496 | ret = i2c_add_adapter(&i2c->adap); |
495 | if (ret) { | 497 | if (ret) { |
496 | dev_err(&pdev->dev, "Failed to add adapter\n"); | 498 | dev_err(&pdev->dev, "Failed to add adapter\n"); |
497 | return ret; | 499 | goto err_clk; |
498 | } | 500 | } |
499 | 501 | ||
500 | /* add in known devices to the bus */ | 502 | /* add in known devices to the bus */ |
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
504 | } | 506 | } |
505 | 507 | ||
506 | return 0; | 508 | return 0; |
509 | |||
510 | err_clk: | ||
511 | clk_disable_unprepare(i2c->clk); | ||
512 | return ret; | ||
507 | } | 513 | } |
508 | 514 | ||
509 | static int ocores_i2c_remove(struct platform_device *pdev) | 515 | static int ocores_i2c_remove(struct platform_device *pdev) |
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 8de073aed001..215ac87f606d 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c | |||
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne | |||
68 | adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); | 68 | adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); |
69 | if (!adap) { | 69 | if (!adap) { |
70 | ret = -ENODEV; | 70 | ret = -ENODEV; |
71 | goto err; | 71 | goto err_with_revert; |
72 | } | 72 | } |
73 | 73 | ||
74 | p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); | 74 | p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); |
@@ -103,6 +103,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne | |||
103 | 103 | ||
104 | err_with_put: | 104 | err_with_put: |
105 | i2c_put_adapter(adap); | 105 | i2c_put_adapter(adap); |
106 | err_with_revert: | ||
107 | of_changeset_revert(&priv->chan[new_chan].chgset); | ||
106 | err: | 108 | err: |
107 | dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); | 109 | dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); |
108 | return ret; | 110 | return ret; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e6dfa1bd3def..5f65a78b27c9 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2462,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2462 | 2462 | ||
2463 | if (addr->dev_addr.bound_dev_if) { | 2463 | if (addr->dev_addr.bound_dev_if) { |
2464 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | 2464 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); |
2465 | if (!ndev) | 2465 | if (!ndev) { |
2466 | return -ENODEV; | 2466 | ret = -ENODEV; |
2467 | goto err2; | ||
2468 | } | ||
2467 | 2469 | ||
2468 | if (ndev->flags & IFF_LOOPBACK) { | 2470 | if (ndev->flags & IFF_LOOPBACK) { |
2469 | dev_put(ndev); | 2471 | dev_put(ndev); |
2470 | if (!id_priv->id.device->get_netdev) | 2472 | if (!id_priv->id.device->get_netdev) { |
2471 | return -EOPNOTSUPP; | 2473 | ret = -EOPNOTSUPP; |
2474 | goto err2; | ||
2475 | } | ||
2472 | 2476 | ||
2473 | ndev = id_priv->id.device->get_netdev(id_priv->id.device, | 2477 | ndev = id_priv->id.device->get_netdev(id_priv->id.device, |
2474 | id_priv->id.port_num); | 2478 | id_priv->id.port_num); |
2475 | if (!ndev) | 2479 | if (!ndev) { |
2476 | return -ENODEV; | 2480 | ret = -ENODEV; |
2481 | goto err2; | ||
2482 | } | ||
2477 | } | 2483 | } |
2478 | 2484 | ||
2479 | route->path_rec->net = &init_net; | 2485 | route->path_rec->net = &init_net; |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 3aca7f6171b4..b6a953aed7e8 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1827,8 +1827,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1827 | (ep->mpa_pkt + sizeof(*mpa)); | 1827 | (ep->mpa_pkt + sizeof(*mpa)); |
1828 | ep->ird = ntohs(mpa_v2_params->ird) & | 1828 | ep->ird = ntohs(mpa_v2_params->ird) & |
1829 | MPA_V2_IRD_ORD_MASK; | 1829 | MPA_V2_IRD_ORD_MASK; |
1830 | ep->ird = min_t(u32, ep->ird, | ||
1831 | cur_max_read_depth(ep->com.dev)); | ||
1830 | ep->ord = ntohs(mpa_v2_params->ord) & | 1832 | ep->ord = ntohs(mpa_v2_params->ord) & |
1831 | MPA_V2_IRD_ORD_MASK; | 1833 | MPA_V2_IRD_ORD_MASK; |
1834 | ep->ord = min_t(u32, ep->ord, | ||
1835 | cur_max_read_depth(ep->com.dev)); | ||
1832 | PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, | 1836 | PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, |
1833 | ep->ord); | 1837 | ep->ord); |
1834 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) | 1838 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) |
@@ -3136,7 +3140,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3136 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | 3140 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
3137 | if (conn_param->ord > ep->ird) { | 3141 | if (conn_param->ord > ep->ird) { |
3138 | if (RELAXED_IRD_NEGOTIATION) { | 3142 | if (RELAXED_IRD_NEGOTIATION) { |
3139 | ep->ord = ep->ird; | 3143 | conn_param->ord = ep->ird; |
3140 | } else { | 3144 | } else { |
3141 | ep->ird = conn_param->ird; | 3145 | ep->ird = conn_param->ird; |
3142 | ep->ord = conn_param->ord; | 3146 | ep->ord = conn_param->ord; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 812ab7278b8e..ac926c942fee 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) | |||
1016 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 1016 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |
1017 | { | 1017 | { |
1018 | struct c4iw_cq *chp; | 1018 | struct c4iw_cq *chp; |
1019 | int ret; | 1019 | int ret = 0; |
1020 | unsigned long flag; | 1020 | unsigned long flag; |
1021 | 1021 | ||
1022 | chp = to_c4iw_cq(ibcq); | 1022 | chp = to_c4iw_cq(ibcq); |
1023 | spin_lock_irqsave(&chp->lock, flag); | 1023 | spin_lock_irqsave(&chp->lock, flag); |
1024 | ret = t4_arm_cq(&chp->cq, | 1024 | t4_arm_cq(&chp->cq, |
1025 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); | 1025 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); |
1026 | if (flags & IB_CQ_REPORT_MISSED_EVENTS) | ||
1027 | ret = t4_cq_notempty(&chp->cq); | ||
1026 | spin_unlock_irqrestore(&chp->lock, flag); | 1028 | spin_unlock_irqrestore(&chp->lock, flag); |
1027 | if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) | ||
1028 | ret = 0; | ||
1029 | return ret; | 1029 | return ret; |
1030 | } | 1030 | } |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 6126bbe36095..02173f4315fa 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) | |||
634 | return (CQE_GENBIT(cqe) == cq->gen); | 634 | return (CQE_GENBIT(cqe) == cq->gen); |
635 | } | 635 | } |
636 | 636 | ||
637 | static inline int t4_cq_notempty(struct t4_cq *cq) | ||
638 | { | ||
639 | return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]); | ||
640 | } | ||
641 | |||
637 | static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) | 642 | static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) |
638 | { | 643 | { |
639 | int ret; | 644 | int ret; |
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 79575ee873f2..0566393e5aba 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <linux/topology.h> | 47 | #include <linux/topology.h> |
48 | #include <linux/cpumask.h> | 48 | #include <linux/cpumask.h> |
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/cpumask.h> | ||
51 | 50 | ||
52 | #include "hfi.h" | 51 | #include "hfi.h" |
53 | #include "affinity.h" | 52 | #include "affinity.h" |
@@ -682,7 +681,7 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | |||
682 | size_t count) | 681 | size_t count) |
683 | { | 682 | { |
684 | struct hfi1_affinity_node *entry; | 683 | struct hfi1_affinity_node *entry; |
685 | struct cpumask mask; | 684 | cpumask_var_t mask; |
686 | int ret, i; | 685 | int ret, i; |
687 | 686 | ||
688 | spin_lock(&node_affinity.lock); | 687 | spin_lock(&node_affinity.lock); |
@@ -692,19 +691,24 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | |||
692 | if (!entry) | 691 | if (!entry) |
693 | return -EINVAL; | 692 | return -EINVAL; |
694 | 693 | ||
695 | ret = cpulist_parse(buf, &mask); | 694 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); |
695 | if (!ret) | ||
696 | return -ENOMEM; | ||
697 | |||
698 | ret = cpulist_parse(buf, mask); | ||
696 | if (ret) | 699 | if (ret) |
697 | return ret; | 700 | goto out; |
698 | 701 | ||
699 | if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) { | 702 | if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) { |
700 | dd_dev_warn(dd, "Invalid CPU mask\n"); | 703 | dd_dev_warn(dd, "Invalid CPU mask\n"); |
701 | return -EINVAL; | 704 | ret = -EINVAL; |
705 | goto out; | ||
702 | } | 706 | } |
703 | 707 | ||
704 | mutex_lock(&sdma_affinity_mutex); | 708 | mutex_lock(&sdma_affinity_mutex); |
705 | /* reset the SDMA interrupt affinity details */ | 709 | /* reset the SDMA interrupt affinity details */ |
706 | init_cpu_mask_set(&entry->def_intr); | 710 | init_cpu_mask_set(&entry->def_intr); |
707 | cpumask_copy(&entry->def_intr.mask, &mask); | 711 | cpumask_copy(&entry->def_intr.mask, mask); |
708 | /* | 712 | /* |
709 | * Reassign the affinity for each SDMA interrupt. | 713 | * Reassign the affinity for each SDMA interrupt. |
710 | */ | 714 | */ |
@@ -720,8 +724,9 @@ int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | |||
720 | if (ret) | 724 | if (ret) |
721 | break; | 725 | break; |
722 | } | 726 | } |
723 | |||
724 | mutex_unlock(&sdma_affinity_mutex); | 727 | mutex_unlock(&sdma_affinity_mutex); |
728 | out: | ||
729 | free_cpumask_var(mask); | ||
725 | return ret ? ret : strnlen(buf, PAGE_SIZE); | 730 | return ret ? ret : strnlen(buf, PAGE_SIZE); |
726 | } | 731 | } |
727 | 732 | ||
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index dbab9d9cc288..a49cc88f08a2 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c | |||
@@ -223,28 +223,32 @@ DEBUGFS_SEQ_FILE_OPEN(ctx_stats) | |||
223 | DEBUGFS_FILE_OPS(ctx_stats); | 223 | DEBUGFS_FILE_OPS(ctx_stats); |
224 | 224 | ||
225 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) | 225 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) |
226 | __acquires(RCU) | 226 | __acquires(RCU) |
227 | { | 227 | { |
228 | struct qp_iter *iter; | 228 | struct qp_iter *iter; |
229 | loff_t n = *pos; | 229 | loff_t n = *pos; |
230 | 230 | ||
231 | rcu_read_lock(); | ||
232 | iter = qp_iter_init(s->private); | 231 | iter = qp_iter_init(s->private); |
232 | |||
233 | /* stop calls rcu_read_unlock */ | ||
234 | rcu_read_lock(); | ||
235 | |||
233 | if (!iter) | 236 | if (!iter) |
234 | return NULL; | 237 | return NULL; |
235 | 238 | ||
236 | while (n--) { | 239 | do { |
237 | if (qp_iter_next(iter)) { | 240 | if (qp_iter_next(iter)) { |
238 | kfree(iter); | 241 | kfree(iter); |
239 | return NULL; | 242 | return NULL; |
240 | } | 243 | } |
241 | } | 244 | } while (n--); |
242 | 245 | ||
243 | return iter; | 246 | return iter; |
244 | } | 247 | } |
245 | 248 | ||
246 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | 249 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, |
247 | loff_t *pos) | 250 | loff_t *pos) |
251 | __must_hold(RCU) | ||
248 | { | 252 | { |
249 | struct qp_iter *iter = iter_ptr; | 253 | struct qp_iter *iter = iter_ptr; |
250 | 254 | ||
@@ -259,7 +263,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | |||
259 | } | 263 | } |
260 | 264 | ||
261 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) | 265 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) |
262 | __releases(RCU) | 266 | __releases(RCU) |
263 | { | 267 | { |
264 | rcu_read_unlock(); | 268 | rcu_read_unlock(); |
265 | } | 269 | } |
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 8246dc7d0573..303f10555729 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c | |||
@@ -888,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd) | |||
888 | } | 888 | } |
889 | 889 | ||
890 | static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, | 890 | static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, |
891 | struct hfi1_packet packet, | 891 | struct hfi1_packet *packet, |
892 | struct hfi1_devdata *dd) | 892 | struct hfi1_devdata *dd) |
893 | { | 893 | { |
894 | struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; | 894 | struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; |
895 | struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd, | 895 | struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd, |
896 | packet.rhf_addr); | 896 | packet->rhf_addr); |
897 | u8 etype = rhf_rcv_type(packet->rhf); | ||
897 | 898 | ||
898 | if (hdr2sc(hdr, packet.rhf) != 0xf) { | 899 | if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) { |
899 | int hwstate = read_logical_state(dd); | 900 | int hwstate = read_logical_state(dd); |
900 | 901 | ||
901 | if (hwstate != LSTATE_ACTIVE) { | 902 | if (hwstate != LSTATE_ACTIVE) { |
@@ -979,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) | |||
979 | /* Auto activate link on non-SC15 packet receive */ | 980 | /* Auto activate link on non-SC15 packet receive */ |
980 | if (unlikely(rcd->ppd->host_link_state == | 981 | if (unlikely(rcd->ppd->host_link_state == |
981 | HLS_UP_ARMED) && | 982 | HLS_UP_ARMED) && |
982 | set_armed_to_active(rcd, packet, dd)) | 983 | set_armed_to_active(rcd, &packet, dd)) |
983 | goto bail; | 984 | goto bail; |
984 | last = process_rcv_packet(&packet, thread); | 985 | last = process_rcv_packet(&packet, thread); |
985 | } | 986 | } |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 1ecbec192358..7e03ccd2554d 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -183,6 +183,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) | |||
183 | if (fd) { | 183 | if (fd) { |
184 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ | 184 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ |
185 | fd->mm = current->mm; | 185 | fd->mm = current->mm; |
186 | atomic_inc(&fd->mm->mm_count); | ||
186 | } | 187 | } |
187 | 188 | ||
188 | fp->private_data = fd; | 189 | fp->private_data = fd; |
@@ -222,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | |||
222 | ret = assign_ctxt(fp, &uinfo); | 223 | ret = assign_ctxt(fp, &uinfo); |
223 | if (ret < 0) | 224 | if (ret < 0) |
224 | return ret; | 225 | return ret; |
225 | setup_ctxt(fp); | 226 | ret = setup_ctxt(fp); |
226 | if (ret) | 227 | if (ret) |
227 | return ret; | 228 | return ret; |
228 | ret = user_init(fp); | 229 | ret = user_init(fp); |
@@ -779,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
779 | mutex_unlock(&hfi1_mutex); | 780 | mutex_unlock(&hfi1_mutex); |
780 | hfi1_free_ctxtdata(dd, uctxt); | 781 | hfi1_free_ctxtdata(dd, uctxt); |
781 | done: | 782 | done: |
783 | mmdrop(fdata->mm); | ||
782 | kobject_put(&dd->kobj); | 784 | kobject_put(&dd->kobj); |
783 | kfree(fdata); | 785 | kfree(fdata); |
784 | return 0; | 786 | return 0; |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 1000e0fd96d9..a021e660d482 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -1272,9 +1272,26 @@ static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf) | |||
1272 | ((!!(rhf_dc_info(rhf))) << 4); | 1272 | ((!!(rhf_dc_info(rhf))) << 4); |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | #define HFI1_JKEY_WIDTH 16 | ||
1276 | #define HFI1_JKEY_MASK (BIT(16) - 1) | ||
1277 | #define HFI1_ADMIN_JKEY_RANGE 32 | ||
1278 | |||
1279 | /* | ||
1280 | * J_KEYs are split and allocated in the following groups: | ||
1281 | * 0 - 31 - users with administrator privileges | ||
1282 | * 32 - 63 - kernel protocols using KDETH packets | ||
1283 | * 64 - 65535 - all other users using KDETH packets | ||
1284 | */ | ||
1275 | static inline u16 generate_jkey(kuid_t uid) | 1285 | static inline u16 generate_jkey(kuid_t uid) |
1276 | { | 1286 | { |
1277 | return from_kuid(current_user_ns(), uid) & 0xffff; | 1287 | u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK; |
1288 | |||
1289 | if (capable(CAP_SYS_ADMIN)) | ||
1290 | jkey &= HFI1_ADMIN_JKEY_RANGE - 1; | ||
1291 | else if (jkey < 64) | ||
1292 | jkey |= BIT(HFI1_JKEY_WIDTH - 1); | ||
1293 | |||
1294 | return jkey; | ||
1278 | } | 1295 | } |
1279 | 1296 | ||
1280 | /* | 1297 | /* |
@@ -1656,7 +1673,6 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd) | |||
1656 | struct hfi1_devdata *hfi1_init_dd(struct pci_dev *, | 1673 | struct hfi1_devdata *hfi1_init_dd(struct pci_dev *, |
1657 | const struct pci_device_id *); | 1674 | const struct pci_device_id *); |
1658 | void hfi1_free_devdata(struct hfi1_devdata *); | 1675 | void hfi1_free_devdata(struct hfi1_devdata *); |
1659 | void cc_state_reclaim(struct rcu_head *rcu); | ||
1660 | struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); | 1676 | struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); |
1661 | 1677 | ||
1662 | /* LED beaconing functions */ | 1678 | /* LED beaconing functions */ |
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index a358d23ecd54..b7935451093c 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
@@ -1333,7 +1333,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) | |||
1333 | spin_unlock(&ppd->cc_state_lock); | 1333 | spin_unlock(&ppd->cc_state_lock); |
1334 | 1334 | ||
1335 | if (cc_state) | 1335 | if (cc_state) |
1336 | call_rcu(&cc_state->rcu, cc_state_reclaim); | 1336 | kfree_rcu(cc_state, rcu); |
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | free_credit_return(dd); | 1339 | free_credit_return(dd); |
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 1263abe01999..39e42c373a01 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c | |||
@@ -1819,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, | |||
1819 | u32 len = OPA_AM_CI_LEN(am) + 1; | 1819 | u32 len = OPA_AM_CI_LEN(am) + 1; |
1820 | int ret; | 1820 | int ret; |
1821 | 1821 | ||
1822 | if (dd->pport->port_type != PORT_TYPE_QSFP) { | ||
1823 | smp->status |= IB_SMP_INVALID_FIELD; | ||
1824 | return reply((struct ib_mad_hdr *)smp); | ||
1825 | } | ||
1826 | |||
1822 | #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ | 1827 | #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ |
1823 | #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) | 1828 | #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) |
1824 | #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) | 1829 | #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) |
@@ -3398,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd) | |||
3398 | 3403 | ||
3399 | spin_unlock(&ppd->cc_state_lock); | 3404 | spin_unlock(&ppd->cc_state_lock); |
3400 | 3405 | ||
3401 | call_rcu(&old_cc_state->rcu, cc_state_reclaim); | 3406 | kfree_rcu(old_cc_state, rcu); |
3402 | } | 3407 | } |
3403 | 3408 | ||
3404 | static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, | 3409 | static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, |
@@ -3553,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, | |||
3553 | return reply((struct ib_mad_hdr *)smp); | 3558 | return reply((struct ib_mad_hdr *)smp); |
3554 | } | 3559 | } |
3555 | 3560 | ||
3556 | void cc_state_reclaim(struct rcu_head *rcu) | ||
3557 | { | ||
3558 | struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu); | ||
3559 | |||
3560 | kfree(cc_state); | ||
3561 | } | ||
3562 | |||
3563 | static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, | 3561 | static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, |
3564 | struct ib_device *ibdev, u8 port, | 3562 | struct ib_device *ibdev, u8 port, |
3565 | u32 *resp_len) | 3563 | u32 *resp_len) |
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index a5aa3517e7d5..4e4d8317c281 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c | |||
@@ -656,10 +656,6 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) | |||
656 | 656 | ||
657 | iter->dev = dev; | 657 | iter->dev = dev; |
658 | iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; | 658 | iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; |
659 | if (qp_iter_next(iter)) { | ||
660 | kfree(iter); | ||
661 | return NULL; | ||
662 | } | ||
663 | 659 | ||
664 | return iter; | 660 | return iter; |
665 | } | 661 | } |
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index a207717ade2a..4e95ad810847 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c | |||
@@ -706,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len, | |||
706 | u8 *data) | 706 | u8 *data) |
707 | { | 707 | { |
708 | struct hfi1_pportdata *ppd; | 708 | struct hfi1_pportdata *ppd; |
709 | u32 excess_len = 0; | 709 | u32 excess_len = len; |
710 | int ret = 0; | 710 | int ret = 0, offset = 0; |
711 | 711 | ||
712 | if (port_num > dd->num_pports || port_num < 1) { | 712 | if (port_num > dd->num_pports || port_num < 1) { |
713 | dd_dev_info(dd, "%s: Invalid port number %d\n", | 713 | dd_dev_info(dd, "%s: Invalid port number %d\n", |
@@ -740,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len, | |||
740 | } | 740 | } |
741 | 741 | ||
742 | memcpy(data, &ppd->qsfp_info.cache[addr], len); | 742 | memcpy(data, &ppd->qsfp_info.cache[addr], len); |
743 | |||
744 | if (addr <= QSFP_MONITOR_VAL_END && | ||
745 | (addr + len) >= QSFP_MONITOR_VAL_START) { | ||
746 | /* Overlap with the dynamic channel monitor range */ | ||
747 | if (addr < QSFP_MONITOR_VAL_START) { | ||
748 | if (addr + len <= QSFP_MONITOR_VAL_END) | ||
749 | len = addr + len - QSFP_MONITOR_VAL_START; | ||
750 | else | ||
751 | len = QSFP_MONITOR_RANGE; | ||
752 | offset = QSFP_MONITOR_VAL_START - addr; | ||
753 | addr = QSFP_MONITOR_VAL_START; | ||
754 | } else if (addr == QSFP_MONITOR_VAL_START) { | ||
755 | offset = 0; | ||
756 | if (addr + len > QSFP_MONITOR_VAL_END) | ||
757 | len = QSFP_MONITOR_RANGE; | ||
758 | } else { | ||
759 | offset = 0; | ||
760 | if (addr + len > QSFP_MONITOR_VAL_END) | ||
761 | len = QSFP_MONITOR_VAL_END - addr + 1; | ||
762 | } | ||
763 | /* Refresh the values of the dynamic monitors from the cable */ | ||
764 | ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len); | ||
765 | if (ret != len) { | ||
766 | ret = -EAGAIN; | ||
767 | goto set_zeroes; | ||
768 | } | ||
769 | } | ||
770 | |||
743 | return 0; | 771 | return 0; |
744 | 772 | ||
745 | set_zeroes: | 773 | set_zeroes: |
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h index 69275ebd9597..36cf52359848 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.h +++ b/drivers/infiniband/hw/hfi1/qsfp.h | |||
@@ -74,6 +74,9 @@ | |||
74 | /* Defined fields that Intel requires of qualified cables */ | 74 | /* Defined fields that Intel requires of qualified cables */ |
75 | /* Byte 0 is Identifier, not checked */ | 75 | /* Byte 0 is Identifier, not checked */ |
76 | /* Byte 1 is reserved "status MSB" */ | 76 | /* Byte 1 is reserved "status MSB" */ |
77 | #define QSFP_MONITOR_VAL_START 22 | ||
78 | #define QSFP_MONITOR_VAL_END 81 | ||
79 | #define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1) | ||
77 | #define QSFP_TX_CTRL_BYTE_OFFS 86 | 80 | #define QSFP_TX_CTRL_BYTE_OFFS 86 |
78 | #define QSFP_PWR_CTRL_BYTE_OFFS 93 | 81 | #define QSFP_PWR_CTRL_BYTE_OFFS 93 |
79 | #define QSFP_CDR_CTRL_BYTE_OFFS 98 | 82 | #define QSFP_CDR_CTRL_BYTE_OFFS 98 |
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index b738acdb9b02..8ec09e470f84 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h | |||
@@ -232,7 +232,7 @@ struct i40iw_device { | |||
232 | struct i40e_client *client; | 232 | struct i40e_client *client; |
233 | struct i40iw_hw hw; | 233 | struct i40iw_hw hw; |
234 | struct i40iw_cm_core cm_core; | 234 | struct i40iw_cm_core cm_core; |
235 | unsigned long *mem_resources; | 235 | u8 *mem_resources; |
236 | unsigned long *allocated_qps; | 236 | unsigned long *allocated_qps; |
237 | unsigned long *allocated_cqs; | 237 | unsigned long *allocated_cqs; |
238 | unsigned long *allocated_mrs; | 238 | unsigned long *allocated_mrs; |
@@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev, | |||
435 | *next = resource_num + 1; | 435 | *next = resource_num + 1; |
436 | if (*next == max_resources) | 436 | if (*next == max_resources) |
437 | *next = 0; | 437 | *next = 0; |
438 | spin_unlock_irqrestore(&iwdev->resource_lock, flags); | ||
439 | *req_resource_num = resource_num; | 438 | *req_resource_num = resource_num; |
439 | spin_unlock_irqrestore(&iwdev->resource_lock, flags); | ||
440 | 440 | ||
441 | return 0; | 441 | return 0; |
442 | } | 442 | } |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 5026dc79978a..7ca0638579c0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c | |||
@@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node, | |||
535 | buf += hdr_len; | 535 | buf += hdr_len; |
536 | } | 536 | } |
537 | 537 | ||
538 | if (pd_len) | 538 | if (pdata && pdata->addr) |
539 | memcpy(buf, pdata->addr, pd_len); | 539 | memcpy(buf, pdata->addr, pdata->size); |
540 | 540 | ||
541 | atomic_set(&sqbuf->refcount, 1); | 541 | atomic_set(&sqbuf->refcount, 1); |
542 | 542 | ||
@@ -3347,26 +3347,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp) | |||
3347 | } | 3347 | } |
3348 | 3348 | ||
3349 | /** | 3349 | /** |
3350 | * i40iw_loopback_nop - Send a nop | ||
3351 | * @qp: associated hw qp | ||
3352 | */ | ||
3353 | static void i40iw_loopback_nop(struct i40iw_sc_qp *qp) | ||
3354 | { | ||
3355 | u64 *wqe; | ||
3356 | u64 header; | ||
3357 | |||
3358 | wqe = qp->qp_uk.sq_base->elem; | ||
3359 | set_64bit_val(wqe, 0, 0); | ||
3360 | set_64bit_val(wqe, 8, 0); | ||
3361 | set_64bit_val(wqe, 16, 0); | ||
3362 | |||
3363 | header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | | ||
3364 | LS_64(0, I40IWQPSQ_SIGCOMPL) | | ||
3365 | LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); | ||
3366 | set_64bit_val(wqe, 24, header); | ||
3367 | } | ||
3368 | |||
3369 | /** | ||
3370 | * i40iw_qp_disconnect - free qp and close cm | 3350 | * i40iw_qp_disconnect - free qp and close cm |
3371 | * @iwqp: associate qp for the connection | 3351 | * @iwqp: associate qp for the connection |
3372 | */ | 3352 | */ |
@@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
3638 | } else { | 3618 | } else { |
3639 | if (iwqp->page) | 3619 | if (iwqp->page) |
3640 | iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); | 3620 | iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); |
3641 | i40iw_loopback_nop(&iwqp->sc_qp); | 3621 | dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0); |
3642 | } | 3622 | } |
3643 | 3623 | ||
3644 | if (iwqp->page) | 3624 | if (iwqp->page) |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 6e9081380a27..0cbbe4038298 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
@@ -1558,6 +1558,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) | |||
1558 | enum i40iw_status_code status; | 1558 | enum i40iw_status_code status; |
1559 | struct i40iw_handler *hdl; | 1559 | struct i40iw_handler *hdl; |
1560 | 1560 | ||
1561 | hdl = i40iw_find_netdev(ldev->netdev); | ||
1562 | if (hdl) | ||
1563 | return 0; | ||
1564 | |||
1561 | hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); | 1565 | hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); |
1562 | if (!hdl) | 1566 | if (!hdl) |
1563 | return -ENOMEM; | 1567 | return -ENOMEM; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 0e8db0a35141..6fd043b1d714 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c | |||
@@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw, | |||
673 | { | 673 | { |
674 | if (!mem) | 674 | if (!mem) |
675 | return I40IW_ERR_PARAM; | 675 | return I40IW_ERR_PARAM; |
676 | /* | ||
677 | * mem->va points to the parent of mem, so both mem and mem->va | ||
678 | * can not be touched once mem->va is freed | ||
679 | */ | ||
676 | kfree(mem->va); | 680 | kfree(mem->va); |
677 | mem->va = NULL; | ||
678 | return 0; | 681 | return 0; |
679 | } | 682 | } |
680 | 683 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 2360338877bf..6329c971c22f 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
@@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, | |||
794 | return &iwqp->ibqp; | 794 | return &iwqp->ibqp; |
795 | error: | 795 | error: |
796 | i40iw_free_qp_resources(iwdev, iwqp, qp_num); | 796 | i40iw_free_qp_resources(iwdev, iwqp, qp_num); |
797 | kfree(mem); | ||
798 | return ERR_PTR(err_code); | 797 | return ERR_PTR(err_code); |
799 | } | 798 | } |
800 | 799 | ||
@@ -1926,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr) | |||
1926 | } | 1925 | } |
1927 | if (iwpbl->pbl_allocated) | 1926 | if (iwpbl->pbl_allocated) |
1928 | i40iw_free_pble(iwdev->pble_rsrc, palloc); | 1927 | i40iw_free_pble(iwdev->pble_rsrc, palloc); |
1929 | kfree(iwpbl->iwmr); | 1928 | kfree(iwmr); |
1930 | iwpbl->iwmr = NULL; | ||
1931 | return 0; | 1929 | return 0; |
1932 | } | 1930 | } |
1933 | 1931 | ||
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index d6fc8a6e8c33..006db6436e3b 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) | |||
576 | checksum == cpu_to_be16(0xffff); | 576 | checksum == cpu_to_be16(0xffff); |
577 | } | 577 | } |
578 | 578 | ||
579 | static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, | 579 | static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, |
580 | unsigned tail, struct mlx4_cqe *cqe, int is_eth) | 580 | unsigned tail, struct mlx4_cqe *cqe, int is_eth) |
581 | { | 581 | { |
582 | struct mlx4_ib_proxy_sqp_hdr *hdr; | 582 | struct mlx4_ib_proxy_sqp_hdr *hdr; |
583 | 583 | ||
@@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct | |||
600 | wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); | 600 | wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); |
601 | wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); | 601 | wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); |
602 | } | 602 | } |
603 | |||
604 | return 0; | ||
605 | } | 603 | } |
606 | 604 | ||
607 | static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, | 605 | static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, |
@@ -692,7 +690,7 @@ repoll: | |||
692 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && | 690 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && |
693 | is_send)) { | 691 | is_send)) { |
694 | pr_warn("Completion for NOP opcode detected!\n"); | 692 | pr_warn("Completion for NOP opcode detected!\n"); |
695 | return -EINVAL; | 693 | return -EAGAIN; |
696 | } | 694 | } |
697 | 695 | ||
698 | /* Resize CQ in progress */ | 696 | /* Resize CQ in progress */ |
@@ -723,7 +721,7 @@ repoll: | |||
723 | if (unlikely(!mqp)) { | 721 | if (unlikely(!mqp)) { |
724 | pr_warn("CQ %06x with entry for unknown QPN %06x\n", | 722 | pr_warn("CQ %06x with entry for unknown QPN %06x\n", |
725 | cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); | 723 | cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); |
726 | return -EINVAL; | 724 | return -EAGAIN; |
727 | } | 725 | } |
728 | 726 | ||
729 | *cur_qp = to_mibqp(mqp); | 727 | *cur_qp = to_mibqp(mqp); |
@@ -741,7 +739,7 @@ repoll: | |||
741 | if (unlikely(!msrq)) { | 739 | if (unlikely(!msrq)) { |
742 | pr_warn("CQ %06x with entry for unknown SRQN %06x\n", | 740 | pr_warn("CQ %06x with entry for unknown SRQN %06x\n", |
743 | cq->mcq.cqn, srq_num); | 741 | cq->mcq.cqn, srq_num); |
744 | return -EINVAL; | 742 | return -EAGAIN; |
745 | } | 743 | } |
746 | } | 744 | } |
747 | 745 | ||
@@ -852,9 +850,11 @@ repoll: | |||
852 | if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { | 850 | if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { |
853 | if ((*cur_qp)->mlx4_ib_qp_type & | 851 | if ((*cur_qp)->mlx4_ib_qp_type & |
854 | (MLX4_IB_QPT_PROXY_SMI_OWNER | | 852 | (MLX4_IB_QPT_PROXY_SMI_OWNER | |
855 | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) | 853 | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { |
856 | return use_tunnel_data(*cur_qp, cq, wc, tail, | 854 | use_tunnel_data(*cur_qp, cq, wc, tail, cqe, |
857 | cqe, is_eth); | 855 | is_eth); |
856 | return 0; | ||
857 | } | ||
858 | } | 858 | } |
859 | 859 | ||
860 | wc->slid = be16_to_cpu(cqe->rlid); | 860 | wc->slid = be16_to_cpu(cqe->rlid); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a84bb766fc62..1b4094baa2de 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/io-mapping.h> | ||
41 | #if defined(CONFIG_X86) | 40 | #if defined(CONFIG_X86) |
42 | #include <asm/pat.h> | 41 | #include <asm/pat.h> |
43 | #endif | 42 | #endif |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 16740dcb876b..67fc0b6857e1 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
@@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, | |||
1156 | attr->max_srq = | 1156 | attr->max_srq = |
1157 | (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> | 1157 | (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> |
1158 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; | 1158 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; |
1159 | attr->max_send_sge = ((rsp->max_write_send_sge & | 1159 | attr->max_send_sge = ((rsp->max_recv_send_sge & |
1160 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | 1160 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> |
1161 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); | 1161 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); |
1162 | attr->max_recv_sge = (rsp->max_write_send_sge & | 1162 | attr->max_recv_sge = (rsp->max_recv_send_sge & |
1163 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> | 1163 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >> |
1164 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; | 1164 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT; |
1165 | attr->max_srq_sge = (rsp->max_srq_rqe_sge & | 1165 | attr->max_srq_sge = (rsp->max_srq_rqe_sge & |
1166 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> | 1166 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >> |
1167 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; | 1167 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET; |
1168 | attr->max_rdma_sge = (rsp->max_write_send_sge & | 1168 | attr->max_rdma_sge = (rsp->max_wr_rd_sge & |
1169 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >> | 1169 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >> |
1170 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT; | 1170 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT; |
1171 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & | 1171 | attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & |
1172 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> | 1172 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> |
1173 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; | 1173 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 0efc9662c6d8..37df4481bb8f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h | |||
@@ -554,9 +554,9 @@ enum { | |||
554 | OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18, | 554 | OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18, |
555 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, | 555 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, |
556 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, | 556 | OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, |
557 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16, | 557 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16, |
558 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF << | 558 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF << |
559 | OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT, | 559 | OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT, |
560 | 560 | ||
561 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, | 561 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, |
562 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, | 562 | OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, |
@@ -612,6 +612,8 @@ enum { | |||
612 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, | 612 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0, |
613 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << | 613 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF << |
614 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, | 614 | OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET, |
615 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0, | ||
616 | OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF, | ||
615 | }; | 617 | }; |
616 | 618 | ||
617 | struct ocrdma_mbx_query_config { | 619 | struct ocrdma_mbx_query_config { |
@@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config { | |||
619 | struct ocrdma_mbx_rsp rsp; | 621 | struct ocrdma_mbx_rsp rsp; |
620 | u32 qp_srq_cq_ird_ord; | 622 | u32 qp_srq_cq_ird_ord; |
621 | u32 max_pd_ca_ack_delay; | 623 | u32 max_pd_ca_ack_delay; |
622 | u32 max_write_send_sge; | 624 | u32 max_recv_send_sge; |
623 | u32 max_ird_ord_per_qp; | 625 | u32 max_ird_ord_per_qp; |
624 | u32 max_shared_ird_ord; | 626 | u32 max_shared_ird_ord; |
625 | u32 max_mr; | 627 | u32 max_mr; |
@@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config { | |||
639 | u32 max_wqes_rqes_per_q; | 641 | u32 max_wqes_rqes_per_q; |
640 | u32 max_cq_cqes_per_cq; | 642 | u32 max_cq_cqes_per_cq; |
641 | u32 max_srq_rqe_sge; | 643 | u32 max_srq_rqe_sge; |
644 | u32 max_wr_rd_sge; | ||
645 | u32 ird_pgsz_num_pages; | ||
642 | }; | 646 | }; |
643 | 647 | ||
644 | struct ocrdma_fw_ver_rsp { | 648 | struct ocrdma_fw_ver_rsp { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index b1a3d91fe8b9..0aa854737e74 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, | |||
125 | IB_DEVICE_SYS_IMAGE_GUID | | 125 | IB_DEVICE_SYS_IMAGE_GUID | |
126 | IB_DEVICE_LOCAL_DMA_LKEY | | 126 | IB_DEVICE_LOCAL_DMA_LKEY | |
127 | IB_DEVICE_MEM_MGT_EXTENSIONS; | 127 | IB_DEVICE_MEM_MGT_EXTENSIONS; |
128 | attr->max_sge = dev->attr.max_send_sge; | 128 | attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge); |
129 | attr->max_sge_rd = attr->max_sge; | 129 | attr->max_sge_rd = dev->attr.max_rdma_sge; |
130 | attr->max_cq = dev->attr.max_cq; | 130 | attr->max_cq = dev->attr.max_cq; |
131 | attr->max_cqe = dev->attr.max_cqe; | 131 | attr->max_cqe = dev->attr.max_cqe; |
132 | attr->max_mr = dev->attr.max_mr; | 132 | attr->max_mr = dev->attr.max_mr; |
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c index 5e75b43c596b..5bad8e3b40bb 100644 --- a/drivers/infiniband/hw/qib/qib_debugfs.c +++ b/drivers/infiniband/hw/qib/qib_debugfs.c | |||
@@ -189,27 +189,32 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v) | |||
189 | DEBUGFS_FILE(ctx_stats) | 189 | DEBUGFS_FILE(ctx_stats) |
190 | 190 | ||
191 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) | 191 | static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) |
192 | __acquires(RCU) | ||
192 | { | 193 | { |
193 | struct qib_qp_iter *iter; | 194 | struct qib_qp_iter *iter; |
194 | loff_t n = *pos; | 195 | loff_t n = *pos; |
195 | 196 | ||
196 | rcu_read_lock(); | ||
197 | iter = qib_qp_iter_init(s->private); | 197 | iter = qib_qp_iter_init(s->private); |
198 | |||
199 | /* stop calls rcu_read_unlock */ | ||
200 | rcu_read_lock(); | ||
201 | |||
198 | if (!iter) | 202 | if (!iter) |
199 | return NULL; | 203 | return NULL; |
200 | 204 | ||
201 | while (n--) { | 205 | do { |
202 | if (qib_qp_iter_next(iter)) { | 206 | if (qib_qp_iter_next(iter)) { |
203 | kfree(iter); | 207 | kfree(iter); |
204 | return NULL; | 208 | return NULL; |
205 | } | 209 | } |
206 | } | 210 | } while (n--); |
207 | 211 | ||
208 | return iter; | 212 | return iter; |
209 | } | 213 | } |
210 | 214 | ||
211 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | 215 | static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, |
212 | loff_t *pos) | 216 | loff_t *pos) |
217 | __must_hold(RCU) | ||
213 | { | 218 | { |
214 | struct qib_qp_iter *iter = iter_ptr; | 219 | struct qib_qp_iter *iter = iter_ptr; |
215 | 220 | ||
@@ -224,6 +229,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, | |||
224 | } | 229 | } |
225 | 230 | ||
226 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) | 231 | static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) |
232 | __releases(RCU) | ||
227 | { | 233 | { |
228 | rcu_read_unlock(); | 234 | rcu_read_unlock(); |
229 | } | 235 | } |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index fcdf37913a26..c3edc033f7c4 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
@@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf, | |||
328 | 328 | ||
329 | pos = *ppos; | 329 | pos = *ppos; |
330 | 330 | ||
331 | if (pos != 0) { | 331 | if (pos != 0 || count != sizeof(struct qib_flash)) |
332 | ret = -EINVAL; | 332 | return -EINVAL; |
333 | goto bail; | ||
334 | } | ||
335 | |||
336 | if (count != sizeof(struct qib_flash)) { | ||
337 | ret = -EINVAL; | ||
338 | goto bail; | ||
339 | } | ||
340 | |||
341 | tmp = kmalloc(count, GFP_KERNEL); | ||
342 | if (!tmp) { | ||
343 | ret = -ENOMEM; | ||
344 | goto bail; | ||
345 | } | ||
346 | 333 | ||
347 | if (copy_from_user(tmp, buf, count)) { | 334 | tmp = memdup_user(buf, count); |
348 | ret = -EFAULT; | 335 | if (IS_ERR(tmp)) |
349 | goto bail_tmp; | 336 | return PTR_ERR(tmp); |
350 | } | ||
351 | 337 | ||
352 | dd = private2dd(file); | 338 | dd = private2dd(file); |
353 | if (qib_eeprom_write(dd, pos, tmp, count)) { | 339 | if (qib_eeprom_write(dd, pos, tmp, count)) { |
@@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf, | |||
361 | 347 | ||
362 | bail_tmp: | 348 | bail_tmp: |
363 | kfree(tmp); | 349 | kfree(tmp); |
364 | |||
365 | bail: | ||
366 | return ret; | 350 | return ret; |
367 | } | 351 | } |
368 | 352 | ||
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 9cc0aae1d781..f9b8cd2354d1 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -573,10 +573,6 @@ struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev) | |||
573 | return NULL; | 573 | return NULL; |
574 | 574 | ||
575 | iter->dev = dev; | 575 | iter->dev = dev; |
576 | if (qib_qp_iter_next(iter)) { | ||
577 | kfree(iter); | ||
578 | return NULL; | ||
579 | } | ||
580 | 576 | ||
581 | return iter; | 577 | return iter; |
582 | } | 578 | } |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index c229b9f4a52d..0a89a955550b 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c | |||
@@ -664,7 +664,8 @@ static int __init usnic_ib_init(void) | |||
664 | return err; | 664 | return err; |
665 | } | 665 | } |
666 | 666 | ||
667 | if (pci_register_driver(&usnic_ib_pci_driver)) { | 667 | err = pci_register_driver(&usnic_ib_pci_driver); |
668 | if (err) { | ||
668 | usnic_err("Unable to register with PCI\n"); | 669 | usnic_err("Unable to register with PCI\n"); |
669 | goto out_umem_fini; | 670 | goto out_umem_fini; |
670 | } | 671 | } |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index bdb540f25a88..870b4f212fbc 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -873,7 +873,8 @@ bail_qpn: | |||
873 | free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); | 873 | free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); |
874 | 874 | ||
875 | bail_rq_wq: | 875 | bail_rq_wq: |
876 | vfree(qp->r_rq.wq); | 876 | if (!qp->ip) |
877 | vfree(qp->r_rq.wq); | ||
877 | 878 | ||
878 | bail_driver_priv: | 879 | bail_driver_priv: |
879 | rdi->driver_f.qp_priv_free(rdi, qp); | 880 | rdi->driver_f.qp_priv_free(rdi, qp); |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ba6be060a476..7914c14478cd 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -448,7 +448,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn, | |||
448 | 448 | ||
449 | isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); | 449 | isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL); |
450 | if (!isert_conn->login_rsp_buf) { | 450 | if (!isert_conn->login_rsp_buf) { |
451 | isert_err("Unable to allocate isert_conn->login_rspbuf\n"); | 451 | ret = -ENOMEM; |
452 | goto out_unmap_login_req_buf; | 452 | goto out_unmap_login_req_buf; |
453 | } | 453 | } |
454 | 454 | ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index dfa23b075a88..883bbfe08e0e 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport) | |||
522 | if (ret) | 522 | if (ret) |
523 | goto err_query_port; | 523 | goto err_query_port; |
524 | 524 | ||
525 | snprintf(sport->port_guid, sizeof(sport->port_guid), | ||
526 | "0x%016llx%016llx", | ||
527 | be64_to_cpu(sport->gid.global.subnet_prefix), | ||
528 | be64_to_cpu(sport->gid.global.interface_id)); | ||
529 | |||
525 | if (!sport->mad_agent) { | 530 | if (!sport->mad_agent) { |
526 | memset(®_req, 0, sizeof(reg_req)); | 531 | memset(®_req, 0, sizeof(reg_req)); |
527 | reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; | 532 | reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; |
@@ -2548,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device) | |||
2548 | sdev->device->name, i); | 2553 | sdev->device->name, i); |
2549 | goto err_ring; | 2554 | goto err_ring; |
2550 | } | 2555 | } |
2551 | snprintf(sport->port_guid, sizeof(sport->port_guid), | ||
2552 | "0x%016llx%016llx", | ||
2553 | be64_to_cpu(sport->gid.global.subnet_prefix), | ||
2554 | be64_to_cpu(sport->gid.global.interface_id)); | ||
2555 | } | 2556 | } |
2556 | 2557 | ||
2557 | spin_lock(&srpt_dev_lock); | 2558 | spin_lock(&srpt_dev_lock); |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index 7d61439be5f2..0c07e1023a46 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) | |||
376 | /* Reset the KBC controller to clear all previous status.*/ | 376 | /* Reset the KBC controller to clear all previous status.*/ |
377 | reset_control_assert(kbc->rst); | 377 | reset_control_assert(kbc->rst); |
378 | udelay(100); | 378 | udelay(100); |
379 | reset_control_assert(kbc->rst); | 379 | reset_control_deassert(kbc->rst); |
380 | udelay(100); | 380 | udelay(100); |
381 | 381 | ||
382 | tegra_kbc_config_pins(kbc); | 382 | tegra_kbc_config_pins(kbc); |
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index faa295ec4f31..c83bce89028b 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c | |||
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr, | |||
553 | goto free_struct_buff; | 553 | goto free_struct_buff; |
554 | 554 | ||
555 | reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); | 555 | reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); |
556 | map_offset = 0; | ||
557 | for (i = 0; i < rdesc->num_registers; i++) { | 556 | for (i = 0; i < rdesc->num_registers; i++) { |
558 | struct rmi_register_desc_item *item = &rdesc->registers[i]; | 557 | struct rmi_register_desc_item *item = &rdesc->registers[i]; |
559 | int reg_size = struct_buf[offset]; | 558 | int reg_size = struct_buf[offset]; |
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr, | |||
576 | item->reg = reg; | 575 | item->reg = reg; |
577 | item->reg_size = reg_size; | 576 | item->reg_size = reg_size; |
578 | 577 | ||
578 | map_offset = 0; | ||
579 | |||
579 | do { | 580 | do { |
580 | for (b = 0; b < 7; b++) { | 581 | for (b = 0; b < 7; b++) { |
581 | if (struct_buf[offset] & (0x1 << b)) | 582 | if (struct_buf[offset] & (0x1 << b)) |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index b4d34086e73f..405252a884dd 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
@@ -1305,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx) | |||
1305 | serio->write = i8042_aux_write; | 1305 | serio->write = i8042_aux_write; |
1306 | serio->start = i8042_start; | 1306 | serio->start = i8042_start; |
1307 | serio->stop = i8042_stop; | 1307 | serio->stop = i8042_stop; |
1308 | serio->ps2_cmd_mutex = &i8042_mutex; | ||
1308 | serio->port_data = port; | 1309 | serio->port_data = port; |
1309 | serio->dev.parent = &i8042_platform_device->dev; | 1310 | serio->dev.parent = &i8042_platform_device->dev; |
1310 | if (idx < 0) { | 1311 | if (idx < 0) { |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a61b2153ab8c..1ce3ecbe37f8 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi) | |||
1473 | 1473 | ||
1474 | ads784x_hwmon_unregister(spi, ts); | 1474 | ads784x_hwmon_unregister(spi, ts); |
1475 | 1475 | ||
1476 | regulator_disable(ts->reg); | ||
1477 | regulator_put(ts->reg); | 1476 | regulator_put(ts->reg); |
1478 | 1477 | ||
1479 | if (!ts->get_pendown_state) { | 1478 | if (!ts->get_pendown_state) { |
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index 7379fe153cf9..b2744a64e933 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c | |||
@@ -464,7 +464,7 @@ static int silead_ts_probe(struct i2c_client *client, | |||
464 | return -ENODEV; | 464 | return -ENODEV; |
465 | 465 | ||
466 | /* Power GPIO pin */ | 466 | /* Power GPIO pin */ |
467 | data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); | 467 | data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW); |
468 | if (IS_ERR(data->gpio_power)) { | 468 | if (IS_ERR(data->gpio_power)) { |
469 | if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER) | 469 | if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER) |
470 | dev_err(dev, "Shutdown GPIO request failed\n"); | 470 | dev_err(dev, "Shutdown GPIO request failed\n"); |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index ce801170d5f2..641e88761319 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
879 | * We may have concurrent producers, so we need to be careful | 879 | * We may have concurrent producers, so we need to be careful |
880 | * not to touch any of the shadow cmdq state. | 880 | * not to touch any of the shadow cmdq state. |
881 | */ | 881 | */ |
882 | queue_read(cmd, Q_ENT(q, idx), q->ent_dwords); | 882 | queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); |
883 | dev_err(smmu->dev, "skipping command in error state:\n"); | 883 | dev_err(smmu->dev, "skipping command in error state:\n"); |
884 | for (i = 0; i < ARRAY_SIZE(cmd); ++i) | 884 | for (i = 0; i < ARRAY_SIZE(cmd); ++i) |
885 | dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); | 885 | dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]); |
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) | |||
890 | return; | 890 | return; |
891 | } | 891 | } |
892 | 892 | ||
893 | queue_write(cmd, Q_ENT(q, idx), q->ent_dwords); | 893 | queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); |
894 | } | 894 | } |
895 | 895 | ||
896 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, | 896 | static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, |
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1034 | case STRTAB_STE_0_CFG_S2_TRANS: | 1034 | case STRTAB_STE_0_CFG_S2_TRANS: |
1035 | ste_live = true; | 1035 | ste_live = true; |
1036 | break; | 1036 | break; |
1037 | case STRTAB_STE_0_CFG_ABORT: | ||
1038 | if (disable_bypass) | ||
1039 | break; | ||
1037 | default: | 1040 | default: |
1038 | BUG(); /* STE corruption */ | 1041 | BUG(); /* STE corruption */ |
1039 | } | 1042 | } |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 4f49fe29f202..2db74ebc3240 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = { | |||
686 | 686 | ||
687 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | 687 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
688 | { | 688 | { |
689 | int flags, ret; | 689 | u32 fsr, fsynr; |
690 | u32 fsr, fsynr, resume; | ||
691 | unsigned long iova; | 690 | unsigned long iova; |
692 | struct iommu_domain *domain = dev; | 691 | struct iommu_domain *domain = dev; |
693 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); | 692 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
701 | if (!(fsr & FSR_FAULT)) | 700 | if (!(fsr & FSR_FAULT)) |
702 | return IRQ_NONE; | 701 | return IRQ_NONE; |
703 | 702 | ||
704 | if (fsr & FSR_IGN) | ||
705 | dev_err_ratelimited(smmu->dev, | ||
706 | "Unexpected context fault (fsr 0x%x)\n", | ||
707 | fsr); | ||
708 | |||
709 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | 703 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); |
710 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | ||
711 | |||
712 | iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); | 704 | iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); |
713 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | ||
714 | ret = IRQ_HANDLED; | ||
715 | resume = RESUME_RETRY; | ||
716 | } else { | ||
717 | dev_err_ratelimited(smmu->dev, | ||
718 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | ||
719 | iova, fsynr, cfg->cbndx); | ||
720 | ret = IRQ_NONE; | ||
721 | resume = RESUME_TERMINATE; | ||
722 | } | ||
723 | |||
724 | /* Clear the faulting FSR */ | ||
725 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | ||
726 | 705 | ||
727 | /* Retry or terminate any stalled transactions */ | 706 | dev_err_ratelimited(smmu->dev, |
728 | if (fsr & FSR_SS) | 707 | "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n", |
729 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | 708 | fsr, iova, fsynr, cfg->cbndx); |
730 | 709 | ||
731 | return ret; | 710 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); |
711 | return IRQ_HANDLED; | ||
732 | } | 712 | } |
733 | 713 | ||
734 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | 714 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) |
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
837 | } | 817 | } |
838 | 818 | ||
839 | /* SCTLR */ | 819 | /* SCTLR */ |
840 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | 820 | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; |
841 | if (stage1) | 821 | if (stage1) |
842 | reg |= SCTLR_S1_ASIDPNE; | 822 | reg |= SCTLR_S1_ASIDPNE; |
843 | #ifdef __BIG_ENDIAN | 823 | #ifdef __BIG_ENDIAN |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 08a1e2f3690f..00c8a08d56e7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
68 | if (!iovad) | 68 | if (!iovad) |
69 | return; | 69 | return; |
70 | 70 | ||
71 | put_iova_domain(iovad); | 71 | if (iovad->granule) |
72 | put_iova_domain(iovad); | ||
72 | kfree(iovad); | 73 | kfree(iovad); |
73 | domain->iova_cookie = NULL; | 74 | domain->iova_cookie = NULL; |
74 | } | 75 | } |
@@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |||
151 | } | 152 | } |
152 | } | 153 | } |
153 | 154 | ||
154 | static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, | 155 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
155 | dma_addr_t dma_limit) | 156 | dma_addr_t dma_limit) |
156 | { | 157 | { |
158 | struct iova_domain *iovad = domain->iova_cookie; | ||
157 | unsigned long shift = iova_shift(iovad); | 159 | unsigned long shift = iova_shift(iovad); |
158 | unsigned long length = iova_align(iovad, size) >> shift; | 160 | unsigned long length = iova_align(iovad, size) >> shift; |
159 | 161 | ||
162 | if (domain->geometry.force_aperture) | ||
163 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | ||
160 | /* | 164 | /* |
161 | * Enforce size-alignment to be safe - there could perhaps be an | 165 | * Enforce size-alignment to be safe - there could perhaps be an |
162 | * attribute to control this per-device, or at least per-domain... | 166 | * attribute to control this per-device, or at least per-domain... |
@@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
314 | if (!pages) | 318 | if (!pages) |
315 | return NULL; | 319 | return NULL; |
316 | 320 | ||
317 | iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); | 321 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask); |
318 | if (!iova) | 322 | if (!iova) |
319 | goto out_free_pages; | 323 | goto out_free_pages; |
320 | 324 | ||
@@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
386 | phys_addr_t phys = page_to_phys(page) + offset; | 390 | phys_addr_t phys = page_to_phys(page) + offset; |
387 | size_t iova_off = iova_offset(iovad, phys); | 391 | size_t iova_off = iova_offset(iovad, phys); |
388 | size_t len = iova_align(iovad, size + iova_off); | 392 | size_t len = iova_align(iovad, size + iova_off); |
389 | struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); | 393 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); |
390 | 394 | ||
391 | if (!iova) | 395 | if (!iova) |
392 | return DMA_ERROR_CODE; | 396 | return DMA_ERROR_CODE; |
@@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
538 | prev = s; | 542 | prev = s; |
539 | } | 543 | } |
540 | 544 | ||
541 | iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); | 545 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); |
542 | if (!iova) | 546 | if (!iova) |
543 | goto out_restore_sg; | 547 | goto out_restore_sg; |
544 | 548 | ||
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8c6139986d7d..def8ca1c982d 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) | |||
286 | int prot = IOMMU_READ; | 286 | int prot = IOMMU_READ; |
287 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); | 287 | arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl); |
288 | 288 | ||
289 | if (attr & ARM_V7S_PTE_AP_RDONLY) | 289 | if (!(attr & ARM_V7S_PTE_AP_RDONLY)) |
290 | prot |= IOMMU_WRITE; | 290 | prot |= IOMMU_WRITE; |
291 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) | 291 | if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) |
292 | prot |= IOMMU_MMIO; | 292 | prot |= IOMMU_MMIO; |
293 | else if (pte & ARM_V7S_ATTR_C) | 293 | else if (pte & ARM_V7S_ATTR_C) |
294 | prot |= IOMMU_CACHE; | 294 | prot |= IOMMU_CACHE; |
295 | if (pte & ARM_V7S_ATTR_XN(lvl)) | ||
296 | prot |= IOMMU_NOEXEC; | ||
295 | 297 | ||
296 | return prot; | 298 | return prot; |
297 | } | 299 | } |
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 9ed0a8462ccf..3dab13b4a211 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h | |||
@@ -55,19 +55,19 @@ struct mtk_iommu_data { | |||
55 | bool enable_4GB; | 55 | bool enable_4GB; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static int compare_of(struct device *dev, void *data) | 58 | static inline int compare_of(struct device *dev, void *data) |
59 | { | 59 | { |
60 | return dev->of_node == data; | 60 | return dev->of_node == data; |
61 | } | 61 | } |
62 | 62 | ||
63 | static int mtk_iommu_bind(struct device *dev) | 63 | static inline int mtk_iommu_bind(struct device *dev) |
64 | { | 64 | { |
65 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | 65 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
66 | 66 | ||
67 | return component_bind_all(dev, &data->smi_imu); | 67 | return component_bind_all(dev, &data->smi_imu); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void mtk_iommu_unbind(struct device *dev) | 70 | static inline void mtk_iommu_unbind(struct device *dev) |
71 | { | 71 | { |
72 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | 72 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
73 | 73 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 7ceaba81efb4..36b9c28a5c91 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1545,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base) | |||
1545 | u32 val; | 1545 | u32 val; |
1546 | 1546 | ||
1547 | val = readl_relaxed(base + GITS_CTLR); | 1547 | val = readl_relaxed(base + GITS_CTLR); |
1548 | if (val & GITS_CTLR_QUIESCENT) | 1548 | /* |
1549 | * GIC architecture specification requires the ITS to be both | ||
1550 | * disabled and quiescent for writes to GITS_BASER<n> or | ||
1551 | * GITS_CBASER to not have UNPREDICTABLE results. | ||
1552 | */ | ||
1553 | if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) | ||
1549 | return 0; | 1554 | return 0; |
1550 | 1555 | ||
1551 | /* Disable the generation of all interrupts to this ITS */ | 1556 | /* Disable the generation of all interrupts to this ITS */ |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 6fc56c3466b0..ede5672ab34d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -667,13 +667,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
667 | #endif | 667 | #endif |
668 | 668 | ||
669 | #ifdef CONFIG_CPU_PM | 669 | #ifdef CONFIG_CPU_PM |
670 | /* Check whether it's single security state view */ | ||
671 | static bool gic_dist_security_disabled(void) | ||
672 | { | ||
673 | return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; | ||
674 | } | ||
675 | |||
670 | static int gic_cpu_pm_notifier(struct notifier_block *self, | 676 | static int gic_cpu_pm_notifier(struct notifier_block *self, |
671 | unsigned long cmd, void *v) | 677 | unsigned long cmd, void *v) |
672 | { | 678 | { |
673 | if (cmd == CPU_PM_EXIT) { | 679 | if (cmd == CPU_PM_EXIT) { |
674 | gic_enable_redist(true); | 680 | if (gic_dist_security_disabled()) |
681 | gic_enable_redist(true); | ||
675 | gic_cpu_sys_reg_init(); | 682 | gic_cpu_sys_reg_init(); |
676 | } else if (cmd == CPU_PM_ENTER) { | 683 | } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { |
677 | gic_write_grpen1(0); | 684 | gic_write_grpen1(0); |
678 | gic_enable_redist(false); | 685 | gic_enable_redist(false); |
679 | } | 686 | } |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index c2cab572c511..390fac59c6bc 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
769 | int cpu; | 769 | int cpu; |
770 | unsigned long flags, map = 0; | 770 | unsigned long flags, map = 0; |
771 | 771 | ||
772 | if (unlikely(nr_cpu_ids == 1)) { | ||
773 | /* Only one CPU? let's do a self-IPI... */ | ||
774 | writel_relaxed(2 << 24 | irq, | ||
775 | gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); | ||
776 | return; | ||
777 | } | ||
778 | |||
772 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | 779 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
773 | 780 | ||
774 | /* Convert our logical CPU mask into a physical one. */ | 781 | /* Convert our logical CPU mask into a physical one. */ |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index c5f33c3bd228..83f498393a7f 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -713,9 +713,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
713 | unsigned long flags; | 713 | unsigned long flags; |
714 | int i; | 714 | int i; |
715 | 715 | ||
716 | irq_set_chip_and_handler(virq, &gic_level_irq_controller, | ||
717 | handle_level_irq); | ||
718 | |||
719 | spin_lock_irqsave(&gic_lock, flags); | 716 | spin_lock_irqsave(&gic_lock, flags); |
720 | gic_map_to_pin(intr, gic_cpu_pin); | 717 | gic_map_to_pin(intr, gic_cpu_pin); |
721 | gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); | 718 | gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); |
@@ -732,6 +729,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
732 | { | 729 | { |
733 | if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) | 730 | if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) |
734 | return gic_local_irq_domain_map(d, virq, hw); | 731 | return gic_local_irq_domain_map(d, virq, hw); |
732 | |||
733 | irq_set_chip_and_handler(virq, &gic_level_irq_controller, | ||
734 | handle_level_irq); | ||
735 | |||
735 | return gic_shared_irq_domain_map(d, virq, hw, 0); | 736 | return gic_shared_irq_domain_map(d, virq, hw, 0); |
736 | } | 737 | } |
737 | 738 | ||
@@ -771,11 +772,13 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
771 | hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); | 772 | hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); |
772 | 773 | ||
773 | ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, | 774 | ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, |
774 | &gic_edge_irq_controller, | 775 | &gic_level_irq_controller, |
775 | NULL); | 776 | NULL); |
776 | if (ret) | 777 | if (ret) |
777 | goto error; | 778 | goto error; |
778 | 779 | ||
780 | irq_set_handler(virq + i, handle_level_irq); | ||
781 | |||
779 | ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); | 782 | ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); |
780 | if (ret) | 783 | if (ret) |
781 | goto error; | 784 | goto error; |
@@ -890,10 +893,17 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, | |||
890 | return; | 893 | return; |
891 | } | 894 | } |
892 | 895 | ||
896 | static void gic_dev_domain_activate(struct irq_domain *domain, | ||
897 | struct irq_data *d) | ||
898 | { | ||
899 | gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); | ||
900 | } | ||
901 | |||
893 | static struct irq_domain_ops gic_dev_domain_ops = { | 902 | static struct irq_domain_ops gic_dev_domain_ops = { |
894 | .xlate = gic_dev_domain_xlate, | 903 | .xlate = gic_dev_domain_xlate, |
895 | .alloc = gic_dev_domain_alloc, | 904 | .alloc = gic_dev_domain_alloc, |
896 | .free = gic_dev_domain_free, | 905 | .free = gic_dev_domain_free, |
906 | .activate = gic_dev_domain_activate, | ||
897 | }; | 907 | }; |
898 | 908 | ||
899 | static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, | 909 | static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 95a4ca6ce6ff..849ad441cd76 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, | |||
760 | if (!d->nr_stripes || | 760 | if (!d->nr_stripes || |
761 | d->nr_stripes > INT_MAX || | 761 | d->nr_stripes > INT_MAX || |
762 | d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { | 762 | d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { |
763 | pr_err("nr_stripes too large"); | 763 | pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", |
764 | (unsigned)d->nr_stripes); | ||
764 | return -ENOMEM; | 765 | return -ENOMEM; |
765 | } | 766 | } |
766 | 767 | ||
@@ -1820,7 +1821,7 @@ static int cache_alloc(struct cache *ca) | |||
1820 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; | 1821 | free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; |
1821 | 1822 | ||
1822 | if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || | 1823 | if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || |
1823 | !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || | 1824 | !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || |
1824 | !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || | 1825 | !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || |
1825 | !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || | 1826 | !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || |
1826 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || | 1827 | !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || |
@@ -1844,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1844 | struct block_device *bdev, struct cache *ca) | 1845 | struct block_device *bdev, struct cache *ca) |
1845 | { | 1846 | { |
1846 | char name[BDEVNAME_SIZE]; | 1847 | char name[BDEVNAME_SIZE]; |
1847 | const char *err = NULL; | 1848 | const char *err = NULL; /* must be set for any error case */ |
1848 | int ret = 0; | 1849 | int ret = 0; |
1849 | 1850 | ||
1850 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | 1851 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
@@ -1861,8 +1862,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1861 | ca->discard = CACHE_DISCARD(&ca->sb); | 1862 | ca->discard = CACHE_DISCARD(&ca->sb); |
1862 | 1863 | ||
1863 | ret = cache_alloc(ca); | 1864 | ret = cache_alloc(ca); |
1864 | if (ret != 0) | 1865 | if (ret != 0) { |
1866 | if (ret == -ENOMEM) | ||
1867 | err = "cache_alloc(): -ENOMEM"; | ||
1868 | else | ||
1869 | err = "cache_alloc(): unknown error"; | ||
1865 | goto err; | 1870 | goto err; |
1871 | } | ||
1866 | 1872 | ||
1867 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { | 1873 | if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { |
1868 | err = "error calling kobject_add"; | 1874 | err = "error calling kobject_add"; |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4e9784b4e0ac..eedba67b0e3e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -181,7 +181,7 @@ struct crypt_config { | |||
181 | u8 key[0]; | 181 | u8 key[0]; |
182 | }; | 182 | }; |
183 | 183 | ||
184 | #define MIN_IOS 16 | 184 | #define MIN_IOS 64 |
185 | 185 | ||
186 | static void clone_init(struct dm_crypt_io *, struct bio *); | 186 | static void clone_init(struct dm_crypt_io *, struct bio *); |
187 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | 187 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 97e446d54a15..6a2e8dd44a1b 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -289,15 +289,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) | |||
289 | pb->bio_submitted = true; | 289 | pb->bio_submitted = true; |
290 | 290 | ||
291 | /* | 291 | /* |
292 | * Map reads as normal only if corrupt_bio_byte set. | 292 | * Error reads if neither corrupt_bio_byte or drop_writes are set. |
293 | * Otherwise, flakey_end_io() will decide if the reads should be modified. | ||
293 | */ | 294 | */ |
294 | if (bio_data_dir(bio) == READ) { | 295 | if (bio_data_dir(bio) == READ) { |
295 | /* If flags were specified, only corrupt those that match. */ | 296 | if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags)) |
296 | if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && | ||
297 | all_corrupt_bio_flags_match(bio, fc)) | ||
298 | goto map_bio; | ||
299 | else | ||
300 | return -EIO; | 297 | return -EIO; |
298 | goto map_bio; | ||
301 | } | 299 | } |
302 | 300 | ||
303 | /* | 301 | /* |
@@ -334,14 +332,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
334 | struct flakey_c *fc = ti->private; | 332 | struct flakey_c *fc = ti->private; |
335 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 333 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); |
336 | 334 | ||
337 | /* | ||
338 | * Corrupt successful READs while in down state. | ||
339 | */ | ||
340 | if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { | 335 | if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { |
341 | if (fc->corrupt_bio_byte) | 336 | if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && |
337 | all_corrupt_bio_flags_match(bio, fc)) { | ||
338 | /* | ||
339 | * Corrupt successful matching READs while in down state. | ||
340 | */ | ||
342 | corrupt_bio_data(bio, fc); | 341 | corrupt_bio_data(bio, fc); |
343 | else | 342 | |
343 | } else if (!test_bit(DROP_WRITES, &fc->flags)) { | ||
344 | /* | ||
345 | * Error read during the down_interval if drop_writes | ||
346 | * wasn't configured. | ||
347 | */ | ||
344 | return -EIO; | 348 | return -EIO; |
349 | } | ||
345 | } | 350 | } |
346 | 351 | ||
347 | return error; | 352 | return error; |
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 4ca2d1df5b44..07fc1ad42ec5 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c | |||
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis | |||
291 | core->nr_regions = le64_to_cpu(disk->nr_regions); | 291 | core->nr_regions = le64_to_cpu(disk->nr_regions); |
292 | } | 292 | } |
293 | 293 | ||
294 | static int rw_header(struct log_c *lc, int rw) | 294 | static int rw_header(struct log_c *lc, int op) |
295 | { | 295 | { |
296 | lc->io_req.bi_op = rw; | 296 | lc->io_req.bi_op = op; |
297 | lc->io_req.bi_op_flags = 0; | ||
297 | 298 | ||
298 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); | 299 | return dm_io(&lc->io_req, 1, &lc->header_location, NULL); |
299 | } | 300 | } |
@@ -316,7 +317,7 @@ static int read_header(struct log_c *log) | |||
316 | { | 317 | { |
317 | int r; | 318 | int r; |
318 | 319 | ||
319 | r = rw_header(log, READ); | 320 | r = rw_header(log, REQ_OP_READ); |
320 | if (r) | 321 | if (r) |
321 | return r; | 322 | return r; |
322 | 323 | ||
@@ -630,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log) | |||
630 | header_to_disk(&lc->header, lc->disk_header); | 631 | header_to_disk(&lc->header, lc->disk_header); |
631 | 632 | ||
632 | /* write the new header */ | 633 | /* write the new header */ |
633 | r = rw_header(lc, WRITE); | 634 | r = rw_header(lc, REQ_OP_WRITE); |
634 | if (!r) { | 635 | if (!r) { |
635 | r = flush_header(lc); | 636 | r = flush_header(lc); |
636 | if (r) | 637 | if (r) |
@@ -698,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log) | |||
698 | log_clear_bit(lc, lc->clean_bits, i); | 699 | log_clear_bit(lc, lc->clean_bits, i); |
699 | } | 700 | } |
700 | 701 | ||
701 | r = rw_header(lc, WRITE); | 702 | r = rw_header(lc, REQ_OP_WRITE); |
702 | if (r) | 703 | if (r) |
703 | fail_log_device(lc); | 704 | fail_log_device(lc); |
704 | else { | 705 | else { |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 1b9795d75ef8..8abde6b8cedc 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -191,7 +191,6 @@ struct raid_dev { | |||
191 | #define RT_FLAG_RS_BITMAP_LOADED 2 | 191 | #define RT_FLAG_RS_BITMAP_LOADED 2 |
192 | #define RT_FLAG_UPDATE_SBS 3 | 192 | #define RT_FLAG_UPDATE_SBS 3 |
193 | #define RT_FLAG_RESHAPE_RS 4 | 193 | #define RT_FLAG_RESHAPE_RS 4 |
194 | #define RT_FLAG_KEEP_RS_FROZEN 5 | ||
195 | 194 | ||
196 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ | 195 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ |
197 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) | 196 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) |
@@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
861 | { | 860 | { |
862 | unsigned long min_region_size = rs->ti->len / (1 << 21); | 861 | unsigned long min_region_size = rs->ti->len / (1 << 21); |
863 | 862 | ||
863 | if (rs_is_raid0(rs)) | ||
864 | return 0; | ||
865 | |||
864 | if (!region_size) { | 866 | if (!region_size) { |
865 | /* | 867 | /* |
866 | * Choose a reasonable default. All figures in sectors. | 868 | * Choose a reasonable default. All figures in sectors. |
@@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs) | |||
930 | rebuild_cnt++; | 932 | rebuild_cnt++; |
931 | 933 | ||
932 | switch (rs->raid_type->level) { | 934 | switch (rs->raid_type->level) { |
935 | case 0: | ||
936 | break; | ||
933 | case 1: | 937 | case 1: |
934 | if (rebuild_cnt >= rs->md.raid_disks) | 938 | if (rebuild_cnt >= rs->md.raid_disks) |
935 | goto too_many; | 939 | goto too_many; |
@@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
2335 | case 0: | 2339 | case 0: |
2336 | break; | 2340 | break; |
2337 | default: | 2341 | default: |
2342 | /* | ||
2343 | * We have to keep any raid0 data/metadata device pairs or | ||
2344 | * the MD raid0 personality will fail to start the array. | ||
2345 | */ | ||
2346 | if (rs_is_raid0(rs)) | ||
2347 | continue; | ||
2348 | |||
2338 | dev = container_of(rdev, struct raid_dev, rdev); | 2349 | dev = container_of(rdev, struct raid_dev, rdev); |
2339 | if (dev->meta_dev) | 2350 | if (dev->meta_dev) |
2340 | dm_put_device(ti, dev->meta_dev); | 2351 | dm_put_device(ti, dev->meta_dev); |
@@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs) | |||
2579 | } else { | 2590 | } else { |
2580 | /* Process raid1 without delta_disks */ | 2591 | /* Process raid1 without delta_disks */ |
2581 | mddev->raid_disks = rs->raid_disks; | 2592 | mddev->raid_disks = rs->raid_disks; |
2582 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2583 | reshape = false; | 2593 | reshape = false; |
2584 | } | 2594 | } |
2585 | } else { | 2595 | } else { |
@@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs) | |||
2590 | if (reshape) { | 2600 | if (reshape) { |
2591 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); | 2601 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); |
2592 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2602 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
2593 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2594 | } else if (mddev->raid_disks < rs->raid_disks) | 2603 | } else if (mddev->raid_disks < rs->raid_disks) |
2595 | /* Create new superblocks and bitmaps, if any new disks */ | 2604 | /* Create new superblocks and bitmaps, if any new disks */ |
2596 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2605 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
@@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2902 | goto bad; | 2911 | goto bad; |
2903 | 2912 | ||
2904 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2913 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
2905 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2906 | /* Takeover ain't recovery, so disable recovery */ | 2914 | /* Takeover ain't recovery, so disable recovery */ |
2907 | rs_setup_recovery(rs, MaxSector); | 2915 | rs_setup_recovery(rs, MaxSector); |
2908 | rs_set_new(rs); | 2916 | rs_set_new(rs); |
@@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti) | |||
3386 | { | 3394 | { |
3387 | struct raid_set *rs = ti->private; | 3395 | struct raid_set *rs = ti->private; |
3388 | 3396 | ||
3389 | if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { | 3397 | if (!rs->md.suspended) |
3390 | if (!rs->md.suspended) | 3398 | mddev_suspend(&rs->md); |
3391 | mddev_suspend(&rs->md); | 3399 | |
3392 | rs->md.ro = 1; | 3400 | rs->md.ro = 1; |
3393 | } | ||
3394 | } | 3401 | } |
3395 | 3402 | ||
3396 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) | 3403 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) |
3397 | { | 3404 | { |
3398 | int i; | 3405 | int i; |
3399 | uint64_t failed_devices, cleared_failed_devices = 0; | 3406 | uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS]; |
3400 | unsigned long flags; | 3407 | unsigned long flags; |
3408 | bool cleared = false; | ||
3401 | struct dm_raid_superblock *sb; | 3409 | struct dm_raid_superblock *sb; |
3410 | struct mddev *mddev = &rs->md; | ||
3402 | struct md_rdev *r; | 3411 | struct md_rdev *r; |
3403 | 3412 | ||
3413 | /* RAID personalities have to provide hot add/remove methods or we need to bail out. */ | ||
3414 | if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) | ||
3415 | return; | ||
3416 | |||
3417 | memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); | ||
3418 | |||
3404 | for (i = 0; i < rs->md.raid_disks; i++) { | 3419 | for (i = 0; i < rs->md.raid_disks; i++) { |
3405 | r = &rs->dev[i].rdev; | 3420 | r = &rs->dev[i].rdev; |
3406 | if (test_bit(Faulty, &r->flags) && r->sb_page && | 3421 | if (test_bit(Faulty, &r->flags) && r->sb_page && |
@@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) | |||
3420 | * ourselves. | 3435 | * ourselves. |
3421 | */ | 3436 | */ |
3422 | if ((r->raid_disk >= 0) && | 3437 | if ((r->raid_disk >= 0) && |
3423 | (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) | 3438 | (mddev->pers->hot_remove_disk(mddev, r) != 0)) |
3424 | /* Failed to revive this device, try next */ | 3439 | /* Failed to revive this device, try next */ |
3425 | continue; | 3440 | continue; |
3426 | 3441 | ||
@@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) | |||
3430 | clear_bit(Faulty, &r->flags); | 3445 | clear_bit(Faulty, &r->flags); |
3431 | clear_bit(WriteErrorSeen, &r->flags); | 3446 | clear_bit(WriteErrorSeen, &r->flags); |
3432 | clear_bit(In_sync, &r->flags); | 3447 | clear_bit(In_sync, &r->flags); |
3433 | if (r->mddev->pers->hot_add_disk(r->mddev, r)) { | 3448 | if (mddev->pers->hot_add_disk(mddev, r)) { |
3434 | r->raid_disk = -1; | 3449 | r->raid_disk = -1; |
3435 | r->saved_raid_disk = -1; | 3450 | r->saved_raid_disk = -1; |
3436 | r->flags = flags; | 3451 | r->flags = flags; |
3437 | } else { | 3452 | } else { |
3438 | r->recovery_offset = 0; | 3453 | r->recovery_offset = 0; |
3439 | cleared_failed_devices |= 1 << i; | 3454 | set_bit(i, (void *) cleared_failed_devices); |
3455 | cleared = true; | ||
3440 | } | 3456 | } |
3441 | } | 3457 | } |
3442 | } | 3458 | } |
3443 | if (cleared_failed_devices) { | 3459 | |
3460 | /* If any failed devices could be cleared, update all sbs failed_devices bits */ | ||
3461 | if (cleared) { | ||
3462 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; | ||
3463 | |||
3444 | rdev_for_each(r, &rs->md) { | 3464 | rdev_for_each(r, &rs->md) { |
3445 | sb = page_address(r->sb_page); | 3465 | sb = page_address(r->sb_page); |
3446 | failed_devices = le64_to_cpu(sb->failed_devices); | 3466 | sb_retrieve_failed_devices(sb, failed_devices); |
3447 | failed_devices &= ~cleared_failed_devices; | 3467 | |
3448 | sb->failed_devices = cpu_to_le64(failed_devices); | 3468 | for (i = 0; i < DISKS_ARRAY_ELEMS; i++) |
3469 | failed_devices[i] &= ~cleared_failed_devices[i]; | ||
3470 | |||
3471 | sb_update_failed_devices(sb, failed_devices); | ||
3449 | } | 3472 | } |
3450 | } | 3473 | } |
3451 | } | 3474 | } |
@@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti) | |||
3610 | * devices are reachable again. | 3633 | * devices are reachable again. |
3611 | */ | 3634 | */ |
3612 | attempt_restore_of_faulty_devices(rs); | 3635 | attempt_restore_of_faulty_devices(rs); |
3613 | } else { | 3636 | } |
3614 | mddev->ro = 0; | ||
3615 | mddev->in_sync = 0; | ||
3616 | 3637 | ||
3617 | /* | 3638 | mddev->ro = 0; |
3618 | * When passing in flags to the ctr, we expect userspace | 3639 | mddev->in_sync = 0; |
3619 | * to reset them because they made it to the superblocks | ||
3620 | * and reload the mapping anyway. | ||
3621 | * | ||
3622 | * -> only unfreeze recovery in case of a table reload or | ||
3623 | * we'll have a bogus recovery/reshape position | ||
3624 | * retrieved from the superblock by the ctr because | ||
3625 | * the ongoing recovery/reshape will change it after read. | ||
3626 | */ | ||
3627 | if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags)) | ||
3628 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
3629 | 3640 | ||
3630 | if (mddev->suspended) | 3641 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
3631 | mddev_resume(mddev); | 3642 | |
3632 | } | 3643 | if (mddev->suspended) |
3644 | mddev_resume(mddev); | ||
3633 | } | 3645 | } |
3634 | 3646 | ||
3635 | static struct target_type raid_target = { | 3647 | static struct target_type raid_target = { |
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 4ace1da17db8..6c25213ab38c 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c | |||
@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes) | |||
210 | struct path_info *pi = NULL; | 210 | struct path_info *pi = NULL; |
211 | struct dm_path *current_path = NULL; | 211 | struct dm_path *current_path = NULL; |
212 | 212 | ||
213 | local_irq_save(flags); | ||
213 | current_path = *this_cpu_ptr(s->current_path); | 214 | current_path = *this_cpu_ptr(s->current_path); |
214 | if (current_path) { | 215 | if (current_path) { |
215 | percpu_counter_dec(&s->repeat_count); | 216 | percpu_counter_dec(&s->repeat_count); |
216 | if (percpu_counter_read_positive(&s->repeat_count) > 0) | 217 | if (percpu_counter_read_positive(&s->repeat_count) > 0) { |
218 | local_irq_restore(flags); | ||
217 | return current_path; | 219 | return current_path; |
220 | } | ||
218 | } | 221 | } |
219 | 222 | ||
220 | spin_lock_irqsave(&s->lock, flags); | 223 | spin_lock(&s->lock); |
221 | if (!list_empty(&s->valid_paths)) { | 224 | if (!list_empty(&s->valid_paths)) { |
222 | pi = list_entry(s->valid_paths.next, struct path_info, list); | 225 | pi = list_entry(s->valid_paths.next, struct path_info, list); |
223 | list_move_tail(&pi->list, &s->valid_paths); | 226 | list_move_tail(&pi->list, &s->valid_paths); |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index bdee9a01ef35..c466ee2b0c97 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, | |||
90 | */ | 90 | */ |
91 | mutex_lock(&afu->contexts_lock); | 91 | mutex_lock(&afu->contexts_lock); |
92 | idr_preload(GFP_KERNEL); | 92 | idr_preload(GFP_KERNEL); |
93 | i = idr_alloc(&ctx->afu->contexts_idr, ctx, | 93 | i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe, |
94 | ctx->afu->adapter->native->sl_ops->min_pe, | ||
95 | ctx->afu->num_procs, GFP_NOWAIT); | 94 | ctx->afu->num_procs, GFP_NOWAIT); |
96 | idr_preload_end(); | 95 | idr_preload_end(); |
97 | mutex_unlock(&afu->contexts_lock); | 96 | mutex_unlock(&afu->contexts_lock); |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index de090533f18c..344a0ff8f8c7 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
@@ -561,7 +561,6 @@ struct cxl_service_layer_ops { | |||
561 | u64 (*timebase_read)(struct cxl *adapter); | 561 | u64 (*timebase_read)(struct cxl *adapter); |
562 | int capi_mode; | 562 | int capi_mode; |
563 | bool needs_reset_before_disable; | 563 | bool needs_reset_before_disable; |
564 | int min_pe; | ||
565 | }; | 564 | }; |
566 | 565 | ||
567 | struct cxl_native { | 566 | struct cxl_native { |
@@ -603,6 +602,7 @@ struct cxl { | |||
603 | struct bin_attribute cxl_attr; | 602 | struct bin_attribute cxl_attr; |
604 | int adapter_num; | 603 | int adapter_num; |
605 | int user_irqs; | 604 | int user_irqs; |
605 | int min_pe; | ||
606 | u64 ps_size; | 606 | u64 ps_size; |
607 | u16 psl_rev; | 607 | u16 psl_rev; |
608 | u16 base_image; | 608 | u16 base_image; |
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 3bcdaee11ba1..e606fdc4bc9c 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c | |||
@@ -924,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data) | |||
924 | return fail_psl_irq(afu, &irq_info); | 924 | return fail_psl_irq(afu, &irq_info); |
925 | } | 925 | } |
926 | 926 | ||
927 | void native_irq_wait(struct cxl_context *ctx) | 927 | static void native_irq_wait(struct cxl_context *ctx) |
928 | { | 928 | { |
929 | u64 dsisr; | 929 | u64 dsisr; |
930 | int timeout = 1000; | 930 | int timeout = 1000; |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index d152e2de8c93..6f0c4ac4b649 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -379,7 +379,7 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id | |||
379 | 379 | ||
380 | static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) | 380 | static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) |
381 | { | 381 | { |
382 | u64 psl_dsnctl; | 382 | u64 psl_dsnctl, psl_fircntl; |
383 | u64 chipid; | 383 | u64 chipid; |
384 | u64 capp_unit_id; | 384 | u64 capp_unit_id; |
385 | int rc; | 385 | int rc; |
@@ -398,8 +398,11 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_ | |||
398 | cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); | 398 | cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); |
399 | /* snoop write mask */ | 399 | /* snoop write mask */ |
400 | cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); | 400 | cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); |
401 | /* set fir_accum */ | 401 | /* set fir_cntl to recommended value for production env */ |
402 | cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); | 402 | psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ |
403 | psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ | ||
404 | psl_fircntl |= 0x1ULL; /* ce_thresh */ | ||
405 | cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl); | ||
403 | /* for debugging with trace arrays */ | 406 | /* for debugging with trace arrays */ |
404 | cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); | 407 | cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); |
405 | 408 | ||
@@ -1521,14 +1524,15 @@ static const struct cxl_service_layer_ops xsl_ops = { | |||
1521 | .write_timebase_ctrl = write_timebase_ctrl_xsl, | 1524 | .write_timebase_ctrl = write_timebase_ctrl_xsl, |
1522 | .timebase_read = timebase_read_xsl, | 1525 | .timebase_read = timebase_read_xsl, |
1523 | .capi_mode = OPAL_PHB_CAPI_MODE_DMA, | 1526 | .capi_mode = OPAL_PHB_CAPI_MODE_DMA, |
1524 | .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ | ||
1525 | }; | 1527 | }; |
1526 | 1528 | ||
1527 | static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) | 1529 | static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) |
1528 | { | 1530 | { |
1529 | if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { | 1531 | if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { |
1532 | /* Mellanox CX-4 */ | ||
1530 | dev_info(&adapter->dev, "Device uses an XSL\n"); | 1533 | dev_info(&adapter->dev, "Device uses an XSL\n"); |
1531 | adapter->native->sl_ops = &xsl_ops; | 1534 | adapter->native->sl_ops = &xsl_ops; |
1535 | adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */ | ||
1532 | } else { | 1536 | } else { |
1533 | dev_info(&adapter->dev, "Device uses a PSL\n"); | 1537 | dev_info(&adapter->dev, "Device uses a PSL\n"); |
1534 | adapter->native->sl_ops = &psl_ops; | 1538 | adapter->native->sl_ops = &psl_ops; |
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index dee8def1c193..7ada5f1b7bb6 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c | |||
@@ -221,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) | |||
221 | /* Setup the PHB using arch provided callback */ | 221 | /* Setup the PHB using arch provided callback */ |
222 | phb->ops = &cxl_pcie_pci_ops; | 222 | phb->ops = &cxl_pcie_pci_ops; |
223 | phb->cfg_addr = NULL; | 223 | phb->cfg_addr = NULL; |
224 | phb->cfg_data = 0; | 224 | phb->cfg_data = NULL; |
225 | phb->private_data = afu; | 225 | phb->private_data = afu; |
226 | phb->controller_ops = cxl_pci_controller_ops; | 226 | phb->controller_ops = cxl_pci_controller_ops; |
227 | 227 | ||
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 48a5dd740f3b..2206d4477dbb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) | |||
1726 | break; | 1726 | break; |
1727 | 1727 | ||
1728 | if (req_op(next) == REQ_OP_DISCARD || | 1728 | if (req_op(next) == REQ_OP_DISCARD || |
1729 | req_op(next) == REQ_OP_SECURE_ERASE || | ||
1729 | req_op(next) == REQ_OP_FLUSH) | 1730 | req_op(next) == REQ_OP_FLUSH) |
1730 | break; | 1731 | break; |
1731 | 1732 | ||
@@ -2150,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2150 | struct mmc_card *card = md->queue.card; | 2151 | struct mmc_card *card = md->queue.card; |
2151 | struct mmc_host *host = card->host; | 2152 | struct mmc_host *host = card->host; |
2152 | unsigned long flags; | 2153 | unsigned long flags; |
2154 | bool req_is_special = mmc_req_is_special(req); | ||
2153 | 2155 | ||
2154 | if (req && !mq->mqrq_prev->req) | 2156 | if (req && !mq->mqrq_prev->req) |
2155 | /* claim host only for the first request */ | 2157 | /* claim host only for the first request */ |
@@ -2190,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
2190 | } | 2192 | } |
2191 | 2193 | ||
2192 | out: | 2194 | out: |
2193 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || | 2195 | if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special) |
2194 | mmc_req_is_special(req)) | ||
2195 | /* | 2196 | /* |
2196 | * Release host when there are no more requests | 2197 | * Release host when there are no more requests |
2197 | * and after special request(discard, flush) is done. | 2198 | * and after special request(discard, flush) is done. |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index bf14642a576a..708057261b38 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
33 | /* | 33 | /* |
34 | * We only like normal block requests and discards. | 34 | * We only like normal block requests and discards. |
35 | */ | 35 | */ |
36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { | 36 | if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD && |
37 | req_op(req) != REQ_OP_SECURE_ERASE) { | ||
37 | blk_dump_rq_flags(req, "MMC bad request"); | 38 | blk_dump_rq_flags(req, "MMC bad request"); |
38 | return BLKPREP_KILL; | 39 | return BLKPREP_KILL; |
39 | } | 40 | } |
@@ -64,6 +65,8 @@ static int mmc_queue_thread(void *d) | |||
64 | spin_unlock_irq(q->queue_lock); | 65 | spin_unlock_irq(q->queue_lock); |
65 | 66 | ||
66 | if (req || mq->mqrq_prev->req) { | 67 | if (req || mq->mqrq_prev->req) { |
68 | bool req_is_special = mmc_req_is_special(req); | ||
69 | |||
67 | set_current_state(TASK_RUNNING); | 70 | set_current_state(TASK_RUNNING); |
68 | mq->issue_fn(mq, req); | 71 | mq->issue_fn(mq, req); |
69 | cond_resched(); | 72 | cond_resched(); |
@@ -79,7 +82,7 @@ static int mmc_queue_thread(void *d) | |||
79 | * has been finished. Do not assign it to previous | 82 | * has been finished. Do not assign it to previous |
80 | * request. | 83 | * request. |
81 | */ | 84 | */ |
82 | if (mmc_req_is_special(req)) | 85 | if (req_is_special) |
83 | mq->mqrq_cur->req = NULL; | 86 | mq->mqrq_cur->req = NULL; |
84 | 87 | ||
85 | mq->mqrq_prev->brq.mrq.data = NULL; | 88 | mq->mqrq_prev->brq.mrq.data = NULL; |
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d62531124d54..fee5e1271465 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -4,7 +4,9 @@ | |||
4 | static inline bool mmc_req_is_special(struct request *req) | 4 | static inline bool mmc_req_is_special(struct request *req) |
5 | { | 5 | { |
6 | return req && | 6 | return req && |
7 | (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); | 7 | (req_op(req) == REQ_OP_FLUSH || |
8 | req_op(req) == REQ_OP_DISCARD || | ||
9 | req_op(req) == REQ_OP_SECURE_ERASE); | ||
8 | } | 10 | } |
9 | 11 | ||
10 | struct request; | 12 | struct request; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1f276fa30ba6..217e8da0628c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0); | |||
152 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " | 152 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " |
153 | "0 for slow, 1 for fast"); | 153 | "0 for slow, 1 for fast"); |
154 | module_param(ad_select, charp, 0); | 154 | module_param(ad_select, charp, 0); |
155 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " | 155 | MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " |
156 | "0 for stable (default), 1 for bandwidth, " | 156 | "0 for stable (default), 1 for bandwidth, " |
157 | "2 for count"); | 157 | "2 for count"); |
158 | module_param(min_links, int, 0); | 158 | module_param(min_links, int, 0); |
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 8f12bddd5dc9..a0b453ea34c9 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h | |||
@@ -258,7 +258,7 @@ | |||
258 | * BCM5325 and BCM5365 share most definitions below | 258 | * BCM5325 and BCM5365 share most definitions below |
259 | */ | 259 | */ |
260 | #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) | 260 | #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) |
261 | #define ARLTBL_MAC_MASK 0xffffffffffff | 261 | #define ARLTBL_MAC_MASK 0xffffffffffffULL |
262 | #define ARLTBL_VID_S 48 | 262 | #define ARLTBL_VID_S 48 |
263 | #define ARLTBL_VID_MASK_25 0xff | 263 | #define ARLTBL_VID_MASK_25 0xff |
264 | #define ARLTBL_VID_MASK 0xfff | 264 | #define ARLTBL_VID_MASK 0xfff |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d36aedde8cb9..d1d9d3cf9139 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -3187,6 +3187,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) | |||
3187 | return err; | 3187 | return err; |
3188 | } | 3188 | } |
3189 | 3189 | ||
3190 | #ifdef CONFIG_NET_DSA_HWMON | ||
3190 | static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, | 3191 | static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, |
3191 | int reg) | 3192 | int reg) |
3192 | { | 3193 | { |
@@ -3212,6 +3213,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, | |||
3212 | 3213 | ||
3213 | return ret; | 3214 | return ret; |
3214 | } | 3215 | } |
3216 | #endif | ||
3215 | 3217 | ||
3216 | static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) | 3218 | static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) |
3217 | { | 3219 | { |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 37a0f463b8de..18bb9556dd00 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) | |||
793 | netdev_err(ndev, "Could not connect to PHY\n"); | 793 | netdev_err(ndev, "Could not connect to PHY\n"); |
794 | return -ENODEV; | 794 | return -ENODEV; |
795 | } | 795 | } |
796 | #else | ||
797 | return -ENODEV; | ||
796 | #endif | 798 | #endif |
797 | } | 799 | } |
798 | 800 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4bff0f3040df..b0da9693f28a 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) | |||
771 | priv->dev = dev; | 771 | priv->dev = dev; |
772 | 772 | ||
773 | priv->regs = devm_ioremap_resource(dev, &res_regs); | 773 | priv->regs = devm_ioremap_resource(dev, &res_regs); |
774 | if (IS_ERR(priv->regs)) | 774 | if (IS_ERR(priv->regs)) { |
775 | return PTR_ERR(priv->regs); | 775 | err = PTR_ERR(priv->regs); |
776 | goto out_put_node; | ||
777 | } | ||
776 | 778 | ||
777 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); | 779 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); |
778 | 780 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff300f7cf529..659261218d9f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, | |||
12552 | info->data = TG3_RSS_MAX_NUM_QS; | 12552 | info->data = TG3_RSS_MAX_NUM_QS; |
12553 | } | 12553 | } |
12554 | 12554 | ||
12555 | /* The first interrupt vector only | ||
12556 | * handles link interrupts. | ||
12557 | */ | ||
12558 | info->data -= 1; | ||
12559 | return 0; | 12555 | return 0; |
12560 | 12556 | ||
12561 | default: | 12557 | default: |
@@ -14014,6 +14010,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | |||
14014 | } | 14010 | } |
14015 | 14011 | ||
14016 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || | 14012 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || |
14013 | (!ec->rx_coalesce_usecs) || | ||
14017 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || | 14014 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || |
14018 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || | 14015 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || |
14019 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || | 14016 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 36893d8958d4..b6fcf10621b6 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -403,11 +403,11 @@ | |||
403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 | 403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 |
404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 | 404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 |
405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 | 405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 |
406 | #define MACB_CAPS_JUMBO 0x00000020 | ||
406 | #define MACB_CAPS_FIFO_MODE 0x10000000 | 407 | #define MACB_CAPS_FIFO_MODE 0x10000000 |
407 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 | 408 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 |
408 | #define MACB_CAPS_SG_DISABLED 0x40000000 | 409 | #define MACB_CAPS_SG_DISABLED 0x40000000 |
409 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 | 410 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 |
410 | #define MACB_CAPS_JUMBO 0x00000010 | ||
411 | 411 | ||
412 | /* Bit manipulation macros */ | 412 | /* Bit manipulation macros */ |
413 | #define MACB_BIT(name) \ | 413 | #define MACB_BIT(name) \ |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16ba719..f45385f5c6e5 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1299,6 +1299,7 @@ static int | |||
1299 | dm9000_open(struct net_device *dev) | 1299 | dm9000_open(struct net_device *dev) |
1300 | { | 1300 | { |
1301 | struct board_info *db = netdev_priv(dev); | 1301 | struct board_info *db = netdev_priv(dev); |
1302 | unsigned int irq_flags = irq_get_trigger_type(dev->irq); | ||
1302 | 1303 | ||
1303 | if (netif_msg_ifup(db)) | 1304 | if (netif_msg_ifup(db)) |
1304 | dev_dbg(db->dev, "enabling %s\n", dev->name); | 1305 | dev_dbg(db->dev, "enabling %s\n", dev->name); |
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) | |||
1306 | /* If there is no IRQ type specified, tell the user that this is a | 1307 | /* If there is no IRQ type specified, tell the user that this is a |
1307 | * problem | 1308 | * problem |
1308 | */ | 1309 | */ |
1309 | if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) | 1310 | if (irq_flags == IRQF_TRIGGER_NONE) |
1310 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); | 1311 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); |
1311 | 1312 | ||
1313 | irq_flags |= IRQF_SHARED; | ||
1314 | |||
1312 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | 1315 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ |
1313 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | 1316 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ |
1314 | mdelay(1); /* delay needs by DM9000B */ | 1317 | mdelay(1); /* delay needs by DM9000B */ |
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) | |||
1316 | /* Initialize DM9000 board */ | 1319 | /* Initialize DM9000 board */ |
1317 | dm9000_init_dm9000(dev); | 1320 | dm9000_init_dm9000(dev); |
1318 | 1321 | ||
1319 | if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, | 1322 | if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) |
1320 | dev->name, dev)) | ||
1321 | return -EAGAIN; | 1323 | return -EAGAIN; |
1322 | /* Now that we have an interrupt handler hooked up we can unmask | 1324 | /* Now that we have an interrupt handler hooked up we can unmask |
1323 | * our interrupts | 1325 | * our interrupts |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 1235c7f2564b..1e1eb92998fb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { | |||
17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, | 17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, |
18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, | 18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, |
19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, | 19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, |
20 | {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, | 20 | {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, |
21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, | 21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, |
22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, | 22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, |
23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, | 23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, |
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7fd4d54599e4..6b03c8553e59 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c | |||
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = { | |||
2032 | | FLAG2_DISABLE_ASPM_L0S | 2032 | | FLAG2_DISABLE_ASPM_L0S |
2033 | | FLAG2_DISABLE_ASPM_L1 | 2033 | | FLAG2_DISABLE_ASPM_L1 |
2034 | | FLAG2_NO_DISABLE_RX | 2034 | | FLAG2_NO_DISABLE_RX |
2035 | | FLAG2_DMA_BURST, | 2035 | | FLAG2_DMA_BURST |
2036 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2036 | .pba = 32, | 2037 | .pba = 32, |
2037 | .max_hw_frame_size = DEFAULT_JUMBO, | 2038 | .max_hw_frame_size = DEFAULT_JUMBO, |
2038 | .get_variants = e1000_get_variants_82571, | 2039 | .get_variants = e1000_get_variants_82571, |
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = { | |||
2053 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 2054 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
2054 | .flags2 = FLAG2_DISABLE_ASPM_L0S | 2055 | .flags2 = FLAG2_DISABLE_ASPM_L0S |
2055 | | FLAG2_DISABLE_ASPM_L1 | 2056 | | FLAG2_DISABLE_ASPM_L1 |
2056 | | FLAG2_NO_DISABLE_RX, | 2057 | | FLAG2_NO_DISABLE_RX |
2058 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2057 | .pba = 32, | 2059 | .pba = 32, |
2058 | .max_hw_frame_size = DEFAULT_JUMBO, | 2060 | .max_hw_frame_size = DEFAULT_JUMBO, |
2059 | .get_variants = e1000_get_variants_82571, | 2061 | .get_variants = e1000_get_variants_82571, |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ef96cd11d6d2..879cca47b021 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); | |||
452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) | 452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) |
453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) | 453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) |
454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) | 454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) |
455 | #define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) | ||
455 | 456 | ||
456 | #define E1000_RX_DESC_PS(R, i) \ | 457 | #define E1000_RX_DESC_PS(R, i) \ |
457 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 458 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 3e11322d8d58..f3aaca743ea3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = { | |||
5885 | | FLAG_HAS_JUMBO_FRAMES | 5885 | | FLAG_HAS_JUMBO_FRAMES |
5886 | | FLAG_APME_IN_WUC, | 5886 | | FLAG_APME_IN_WUC, |
5887 | .flags2 = FLAG2_HAS_PHY_STATS | 5887 | .flags2 = FLAG2_HAS_PHY_STATS |
5888 | | FLAG2_HAS_EEE, | 5888 | | FLAG2_HAS_EEE |
5889 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
5889 | .pba = 26, | 5890 | .pba = 26, |
5890 | .max_hw_frame_size = 9022, | 5891 | .max_hw_frame_size = 9022, |
5891 | .get_variants = e1000_get_variants_ich8lan, | 5892 | .get_variants = e1000_get_variants_ich8lan, |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 02f443958f31..7017281ba2dc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) | |||
4303 | } | 4303 | } |
4304 | 4304 | ||
4305 | /** | 4305 | /** |
4306 | * e1000e_sanitize_systim - sanitize raw cycle counter reads | ||
4307 | * @hw: pointer to the HW structure | ||
4308 | * @systim: cycle_t value read, sanitized and returned | ||
4309 | * | ||
4310 | * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: | ||
4311 | * check to see that the time is incrementing at a reasonable | ||
4312 | * rate and is a multiple of incvalue. | ||
4313 | **/ | ||
4314 | static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) | ||
4315 | { | ||
4316 | u64 time_delta, rem, temp; | ||
4317 | cycle_t systim_next; | ||
4318 | u32 incvalue; | ||
4319 | int i; | ||
4320 | |||
4321 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4322 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4323 | /* latch SYSTIMH on read of SYSTIML */ | ||
4324 | systim_next = (cycle_t)er32(SYSTIML); | ||
4325 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4326 | |||
4327 | time_delta = systim_next - systim; | ||
4328 | temp = time_delta; | ||
4329 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4330 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4331 | |||
4332 | systim = systim_next; | ||
4333 | |||
4334 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) | ||
4335 | break; | ||
4336 | } | ||
4337 | |||
4338 | return systim; | ||
4339 | } | ||
4340 | |||
4341 | /** | ||
4306 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) | 4342 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) |
4307 | * @cc: cyclecounter structure | 4343 | * @cc: cyclecounter structure |
4308 | **/ | 4344 | **/ |
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4312 | cc); | 4348 | cc); |
4313 | struct e1000_hw *hw = &adapter->hw; | 4349 | struct e1000_hw *hw = &adapter->hw; |
4314 | u32 systimel, systimeh; | 4350 | u32 systimel, systimeh; |
4315 | cycle_t systim, systim_next; | 4351 | cycle_t systim; |
4316 | /* SYSTIMH latching upon SYSTIML read does not work well. | 4352 | /* SYSTIMH latching upon SYSTIML read does not work well. |
4317 | * This means that if SYSTIML overflows after we read it but before | 4353 | * This means that if SYSTIML overflows after we read it but before |
4318 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we | 4354 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we |
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4335 | systim = (cycle_t)systimel; | 4371 | systim = (cycle_t)systimel; |
4336 | systim |= (cycle_t)systimeh << 32; | 4372 | systim |= (cycle_t)systimeh << 32; |
4337 | 4373 | ||
4338 | if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { | 4374 | if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) |
4339 | u64 time_delta, rem, temp; | 4375 | systim = e1000e_sanitize_systim(hw, systim); |
4340 | u32 incvalue; | ||
4341 | int i; | ||
4342 | |||
4343 | /* errata for 82574/82583 possible bad bits read from SYSTIMH/L | ||
4344 | * check to see that the time is incrementing at a reasonable | ||
4345 | * rate and is a multiple of incvalue | ||
4346 | */ | ||
4347 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4348 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4349 | /* latch SYSTIMH on read of SYSTIML */ | ||
4350 | systim_next = (cycle_t)er32(SYSTIML); | ||
4351 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4352 | |||
4353 | time_delta = systim_next - systim; | ||
4354 | temp = time_delta; | ||
4355 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4356 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4357 | |||
4358 | systim = systim_next; | ||
4359 | 4376 | ||
4360 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && | ||
4361 | (rem == 0)) | ||
4362 | break; | ||
4363 | } | ||
4364 | } | ||
4365 | return systim; | 4377 | return systim; |
4366 | } | 4378 | } |
4367 | 4379 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 81c99e1be708..c6ac7a61812f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) | |||
4554 | **/ | 4554 | **/ |
4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | 4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) |
4556 | { | 4556 | { |
4557 | int i, tc_unused = 0; | ||
4557 | u8 num_tc = 0; | 4558 | u8 num_tc = 0; |
4558 | int i; | 4559 | u8 ret = 0; |
4559 | 4560 | ||
4560 | /* Scan the ETS Config Priority Table to find | 4561 | /* Scan the ETS Config Priority Table to find |
4561 | * traffic class enabled for a given priority | 4562 | * traffic class enabled for a given priority |
4562 | * and use the traffic class index to get the | 4563 | * and create a bitmask of enabled TCs |
4563 | * number of traffic classes enabled | ||
4564 | */ | 4564 | */ |
4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { | 4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) |
4566 | if (dcbcfg->etscfg.prioritytable[i] > num_tc) | 4566 | num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); |
4567 | num_tc = dcbcfg->etscfg.prioritytable[i]; | ||
4568 | } | ||
4569 | 4567 | ||
4570 | /* Traffic class index starts from zero so | 4568 | /* Now scan the bitmask to check for |
4571 | * increment to return the actual count | 4569 | * contiguous TCs starting with TC0 |
4572 | */ | 4570 | */ |
4573 | return num_tc + 1; | 4571 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { |
4572 | if (num_tc & BIT(i)) { | ||
4573 | if (!tc_unused) { | ||
4574 | ret++; | ||
4575 | } else { | ||
4576 | pr_err("Non-contiguous TC - Disabling DCB\n"); | ||
4577 | return 1; | ||
4578 | } | ||
4579 | } else { | ||
4580 | tc_unused = 1; | ||
4581 | } | ||
4582 | } | ||
4583 | |||
4584 | /* There is always at least TC0 */ | ||
4585 | if (!ret) | ||
4586 | ret = 1; | ||
4587 | |||
4588 | return ret; | ||
4574 | } | 4589 | } |
4575 | 4590 | ||
4576 | /** | 4591 | /** |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e61b647f5f2a..336c103ae374 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) | |||
744 | } | 744 | } |
745 | } | 745 | } |
746 | 746 | ||
747 | shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); | 747 | shhwtstamps.hwtstamp = |
748 | ktime_add_ns(shhwtstamps.hwtstamp, adjust); | ||
748 | 749 | ||
749 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); | 750 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); |
750 | dev_kfree_skb_any(adapter->ptp_tx_skb); | 751 | dev_kfree_skb_any(adapter->ptp_tx_skb); |
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, | |||
767 | struct sk_buff *skb) | 768 | struct sk_buff *skb) |
768 | { | 769 | { |
769 | __le64 *regval = (__le64 *)va; | 770 | __le64 *regval = (__le64 *)va; |
771 | struct igb_adapter *adapter = q_vector->adapter; | ||
772 | int adjust = 0; | ||
770 | 773 | ||
771 | /* The timestamp is recorded in little endian format. | 774 | /* The timestamp is recorded in little endian format. |
772 | * DWORD: 0 1 2 3 | 775 | * DWORD: 0 1 2 3 |
773 | * Field: Reserved Reserved SYSTIML SYSTIMH | 776 | * Field: Reserved Reserved SYSTIML SYSTIMH |
774 | */ | 777 | */ |
775 | igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), | 778 | igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), |
776 | le64_to_cpu(regval[1])); | 779 | le64_to_cpu(regval[1])); |
780 | |||
781 | /* adjust timestamp for the RX latency based on link speed */ | ||
782 | if (adapter->hw.mac.type == e1000_i210) { | ||
783 | switch (adapter->link_speed) { | ||
784 | case SPEED_10: | ||
785 | adjust = IGB_I210_RX_LATENCY_10; | ||
786 | break; | ||
787 | case SPEED_100: | ||
788 | adjust = IGB_I210_RX_LATENCY_100; | ||
789 | break; | ||
790 | case SPEED_1000: | ||
791 | adjust = IGB_I210_RX_LATENCY_1000; | ||
792 | break; | ||
793 | } | ||
794 | } | ||
795 | skb_hwtstamps(skb)->hwtstamp = | ||
796 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | ||
777 | } | 797 | } |
778 | 798 | ||
779 | /** | 799 | /** |
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, | |||
825 | } | 845 | } |
826 | } | 846 | } |
827 | skb_hwtstamps(skb)->hwtstamp = | 847 | skb_hwtstamps(skb)->hwtstamp = |
828 | ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | 848 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); |
829 | 849 | ||
830 | /* Update the last_rx_timestamp timer in order to enable watchdog check | 850 | /* Update the last_rx_timestamp timer in order to enable watchdog check |
831 | * for error case of latched timestamp on a dropped packet. | 851 | * for error case of latched timestamp on a dropped packet. |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5418c69a7463..b4f03748adc0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4100 | struct ixgbe_hw *hw = &adapter->hw; | 4100 | struct ixgbe_hw *hw = &adapter->hw; |
4101 | u32 vlnctrl, i; | 4101 | u32 vlnctrl, i; |
4102 | 4102 | ||
4103 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4104 | |||
4103 | switch (hw->mac.type) { | 4105 | switch (hw->mac.type) { |
4104 | case ixgbe_mac_82599EB: | 4106 | case ixgbe_mac_82599EB: |
4105 | case ixgbe_mac_X540: | 4107 | case ixgbe_mac_X540: |
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4112 | /* fall through */ | 4114 | /* fall through */ |
4113 | case ixgbe_mac_82598EB: | 4115 | case ixgbe_mac_82598EB: |
4114 | /* legacy case, we can just disable VLAN filtering */ | 4116 | /* legacy case, we can just disable VLAN filtering */ |
4115 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | 4117 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
4116 | vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); | ||
4117 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 4118 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4118 | return; | 4119 | return; |
4119 | } | 4120 | } |
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4125 | /* Set flag so we don't redo unnecessary work */ | 4126 | /* Set flag so we don't redo unnecessary work */ |
4126 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; | 4127 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; |
4127 | 4128 | ||
4129 | /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ | ||
4130 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4131 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4132 | |||
4128 | /* Add PF to all active pools */ | 4133 | /* Add PF to all active pools */ |
4129 | for (i = IXGBE_VLVF_ENTRIES; --i;) { | 4134 | for (i = IXGBE_VLVF_ENTRIES; --i;) { |
4130 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); | 4135 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); |
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4191 | struct ixgbe_hw *hw = &adapter->hw; | 4196 | struct ixgbe_hw *hw = &adapter->hw; |
4192 | u32 vlnctrl, i; | 4197 | u32 vlnctrl, i; |
4193 | 4198 | ||
4199 | /* Set VLAN filtering to enabled */ | ||
4200 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4201 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4202 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4203 | |||
4194 | switch (hw->mac.type) { | 4204 | switch (hw->mac.type) { |
4195 | case ixgbe_mac_82599EB: | 4205 | case ixgbe_mac_82599EB: |
4196 | case ixgbe_mac_X540: | 4206 | case ixgbe_mac_X540: |
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4202 | break; | 4212 | break; |
4203 | /* fall through */ | 4213 | /* fall through */ |
4204 | case ixgbe_mac_82598EB: | 4214 | case ixgbe_mac_82598EB: |
4205 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4206 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
4207 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4208 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4209 | return; | 4215 | return; |
4210 | } | 4216 | } |
4211 | 4217 | ||
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
8390 | struct tcf_exts *exts, u64 *action, u8 *queue) | 8396 | struct tcf_exts *exts, u64 *action, u8 *queue) |
8391 | { | 8397 | { |
8392 | const struct tc_action *a; | 8398 | const struct tc_action *a; |
8399 | LIST_HEAD(actions); | ||
8393 | int err; | 8400 | int err; |
8394 | 8401 | ||
8395 | if (tc_no_actions(exts)) | 8402 | if (tc_no_actions(exts)) |
8396 | return -EINVAL; | 8403 | return -EINVAL; |
8397 | 8404 | ||
8398 | tc_for_each_action(a, exts) { | 8405 | tcf_exts_to_list(exts, &actions); |
8406 | list_for_each_entry(a, &actions, list) { | ||
8399 | 8407 | ||
8400 | /* Drop action */ | 8408 | /* Drop action */ |
8401 | if (is_tcf_gact_shot(a)) { | 8409 | if (is_tcf_gact_shot(a)) { |
@@ -9517,6 +9525,7 @@ skip_sriov: | |||
9517 | 9525 | ||
9518 | /* copy netdev features into list of user selectable features */ | 9526 | /* copy netdev features into list of user selectable features */ |
9519 | netdev->hw_features |= netdev->features | | 9527 | netdev->hw_features |= netdev->features | |
9528 | NETIF_F_HW_VLAN_CTAG_FILTER | | ||
9520 | NETIF_F_HW_VLAN_CTAG_RX | | 9529 | NETIF_F_HW_VLAN_CTAG_RX | |
9521 | NETIF_F_HW_VLAN_CTAG_TX | | 9530 | NETIF_F_HW_VLAN_CTAG_TX | |
9522 | NETIF_F_RXALL | | 9531 | NETIF_F_RXALL | |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b57ae3afb994..f1609542adf1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
245 | case PHY_INTERFACE_MODE_MII: | 245 | case PHY_INTERFACE_MODE_MII: |
246 | ge_mode = 1; | 246 | ge_mode = 1; |
247 | break; | 247 | break; |
248 | case PHY_INTERFACE_MODE_RMII: | 248 | case PHY_INTERFACE_MODE_REVMII: |
249 | ge_mode = 2; | 249 | ge_mode = 2; |
250 | break; | 250 | break; |
251 | case PHY_INTERFACE_MODE_RMII: | ||
252 | if (!mac->id) | ||
253 | goto err_phy; | ||
254 | ge_mode = 3; | ||
255 | break; | ||
251 | default: | 256 | default: |
252 | dev_err(eth->dev, "invalid phy_mode\n"); | 257 | goto err_phy; |
253 | return -1; | ||
254 | } | 258 | } |
255 | 259 | ||
256 | /* put the gmac into the right mode */ | 260 | /* put the gmac into the right mode */ |
@@ -263,13 +267,25 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 267 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
264 | mac->phy_dev->speed = 0; | 268 | mac->phy_dev->speed = 0; |
265 | mac->phy_dev->duplex = 0; | 269 | mac->phy_dev->duplex = 0; |
270 | |||
271 | if (of_phy_is_fixed_link(mac->of_node)) | ||
272 | mac->phy_dev->supported |= | ||
273 | SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
274 | |||
266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | | 275 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
267 | SUPPORTED_Asym_Pause; | 276 | SUPPORTED_Asym_Pause; |
268 | mac->phy_dev->advertising = mac->phy_dev->supported | | 277 | mac->phy_dev->advertising = mac->phy_dev->supported | |
269 | ADVERTISED_Autoneg; | 278 | ADVERTISED_Autoneg; |
270 | phy_start_aneg(mac->phy_dev); | 279 | phy_start_aneg(mac->phy_dev); |
271 | 280 | ||
281 | of_node_put(np); | ||
282 | |||
272 | return 0; | 283 | return 0; |
284 | |||
285 | err_phy: | ||
286 | of_node_put(np); | ||
287 | dev_err(eth->dev, "invalid phy_mode\n"); | ||
288 | return -EINVAL; | ||
273 | } | 289 | } |
274 | 290 | ||
275 | static int mtk_mdio_init(struct mtk_eth *eth) | 291 | static int mtk_mdio_init(struct mtk_eth *eth) |
@@ -542,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |||
542 | return &ring->buf[idx]; | 558 | return &ring->buf[idx]; |
543 | } | 559 | } |
544 | 560 | ||
545 | static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) | 561 | static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) |
546 | { | 562 | { |
547 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | 563 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { |
548 | dma_unmap_single(dev, | 564 | dma_unmap_single(eth->dev, |
549 | dma_unmap_addr(tx_buf, dma_addr0), | 565 | dma_unmap_addr(tx_buf, dma_addr0), |
550 | dma_unmap_len(tx_buf, dma_len0), | 566 | dma_unmap_len(tx_buf, dma_len0), |
551 | DMA_TO_DEVICE); | 567 | DMA_TO_DEVICE); |
552 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | 568 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { |
553 | dma_unmap_page(dev, | 569 | dma_unmap_page(eth->dev, |
554 | dma_unmap_addr(tx_buf, dma_addr0), | 570 | dma_unmap_addr(tx_buf, dma_addr0), |
555 | dma_unmap_len(tx_buf, dma_len0), | 571 | dma_unmap_len(tx_buf, dma_len0), |
556 | DMA_TO_DEVICE); | 572 | DMA_TO_DEVICE); |
@@ -595,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
595 | if (skb_vlan_tag_present(skb)) | 611 | if (skb_vlan_tag_present(skb)) |
596 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); | 612 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); |
597 | 613 | ||
598 | mapped_addr = dma_map_single(&dev->dev, skb->data, | 614 | mapped_addr = dma_map_single(eth->dev, skb->data, |
599 | skb_headlen(skb), DMA_TO_DEVICE); | 615 | skb_headlen(skb), DMA_TO_DEVICE); |
600 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 616 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
601 | return -ENOMEM; | 617 | return -ENOMEM; |
602 | 618 | ||
603 | WRITE_ONCE(itxd->txd1, mapped_addr); | 619 | WRITE_ONCE(itxd->txd1, mapped_addr); |
@@ -623,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
623 | 639 | ||
624 | n_desc++; | 640 | n_desc++; |
625 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); | 641 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); |
626 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | 642 | mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, |
627 | frag_map_size, | 643 | frag_map_size, |
628 | DMA_TO_DEVICE); | 644 | DMA_TO_DEVICE); |
629 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 645 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
630 | goto err_dma; | 646 | goto err_dma; |
631 | 647 | ||
632 | if (i == nr_frags - 1 && | 648 | if (i == nr_frags - 1 && |
@@ -679,7 +695,7 @@ err_dma: | |||
679 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | 695 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
680 | 696 | ||
681 | /* unmap dma */ | 697 | /* unmap dma */ |
682 | mtk_tx_unmap(&dev->dev, tx_buf); | 698 | mtk_tx_unmap(eth, tx_buf); |
683 | 699 | ||
684 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | 700 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; |
685 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | 701 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); |
@@ -836,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
836 | netdev->stats.rx_dropped++; | 852 | netdev->stats.rx_dropped++; |
837 | goto release_desc; | 853 | goto release_desc; |
838 | } | 854 | } |
839 | dma_addr = dma_map_single(ð->netdev[mac]->dev, | 855 | dma_addr = dma_map_single(eth->dev, |
840 | new_data + NET_SKB_PAD, | 856 | new_data + NET_SKB_PAD, |
841 | ring->buf_size, | 857 | ring->buf_size, |
842 | DMA_FROM_DEVICE); | 858 | DMA_FROM_DEVICE); |
843 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | 859 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { |
844 | skb_free_frag(new_data); | 860 | skb_free_frag(new_data); |
845 | netdev->stats.rx_dropped++; | 861 | netdev->stats.rx_dropped++; |
846 | goto release_desc; | 862 | goto release_desc; |
@@ -855,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
855 | } | 871 | } |
856 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | 872 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
857 | 873 | ||
858 | dma_unmap_single(&netdev->dev, trxd.rxd1, | 874 | dma_unmap_single(eth->dev, trxd.rxd1, |
859 | ring->buf_size, DMA_FROM_DEVICE); | 875 | ring->buf_size, DMA_FROM_DEVICE); |
860 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | 876 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); |
861 | skb->dev = netdev; | 877 | skb->dev = netdev; |
@@ -937,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) | |||
937 | done[mac]++; | 953 | done[mac]++; |
938 | budget--; | 954 | budget--; |
939 | } | 955 | } |
940 | mtk_tx_unmap(eth->dev, tx_buf); | 956 | mtk_tx_unmap(eth, tx_buf); |
941 | 957 | ||
942 | ring->last_free = desc; | 958 | ring->last_free = desc; |
943 | atomic_inc(&ring->free_count); | 959 | atomic_inc(&ring->free_count); |
@@ -1092,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) | |||
1092 | 1108 | ||
1093 | if (ring->buf) { | 1109 | if (ring->buf) { |
1094 | for (i = 0; i < MTK_DMA_SIZE; i++) | 1110 | for (i = 0; i < MTK_DMA_SIZE; i++) |
1095 | mtk_tx_unmap(eth->dev, &ring->buf[i]); | 1111 | mtk_tx_unmap(eth, &ring->buf[i]); |
1096 | kfree(ring->buf); | 1112 | kfree(ring->buf); |
1097 | ring->buf = NULL; | 1113 | ring->buf = NULL; |
1098 | } | 1114 | } |
@@ -1751,6 +1767,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |||
1751 | goto free_netdev; | 1767 | goto free_netdev; |
1752 | } | 1768 | } |
1753 | spin_lock_init(&mac->hw_stats->stats_lock); | 1769 | spin_lock_init(&mac->hw_stats->stats_lock); |
1770 | u64_stats_init(&mac->hw_stats->syncp); | ||
1754 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | 1771 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; |
1755 | 1772 | ||
1756 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | 1773 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0f19b01e3fff..dc8b1cb0fdc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
318 | u32 *action, u32 *flow_tag) | 318 | u32 *action, u32 *flow_tag) |
319 | { | 319 | { |
320 | const struct tc_action *a; | 320 | const struct tc_action *a; |
321 | LIST_HEAD(actions); | ||
321 | 322 | ||
322 | if (tc_no_actions(exts)) | 323 | if (tc_no_actions(exts)) |
323 | return -EINVAL; | 324 | return -EINVAL; |
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
325 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 326 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
326 | *action = 0; | 327 | *action = 0; |
327 | 328 | ||
328 | tc_for_each_action(a, exts) { | 329 | tcf_exts_to_list(exts, &actions); |
330 | list_for_each_entry(a, &actions, list) { | ||
329 | /* Only support a single action per rule */ | 331 | /* Only support a single action per rule */ |
330 | if (*action) | 332 | if (*action) |
331 | return -EINVAL; | 333 | return -EINVAL; |
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
362 | u32 *action, u32 *dest_vport) | 364 | u32 *action, u32 *dest_vport) |
363 | { | 365 | { |
364 | const struct tc_action *a; | 366 | const struct tc_action *a; |
367 | LIST_HEAD(actions); | ||
365 | 368 | ||
366 | if (tc_no_actions(exts)) | 369 | if (tc_no_actions(exts)) |
367 | return -EINVAL; | 370 | return -EINVAL; |
368 | 371 | ||
369 | *action = 0; | 372 | *action = 0; |
370 | 373 | ||
371 | tc_for_each_action(a, exts) { | 374 | tcf_exts_to_list(exts, &actions); |
375 | list_for_each_entry(a, &actions, list) { | ||
372 | /* Only support a single action per rule */ | 376 | /* Only support a single action per rule */ |
373 | if (*action) | 377 | if (*action) |
374 | return -EINVAL; | 378 | return -EINVAL; |
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
503 | struct mlx5e_tc_flow *flow; | 507 | struct mlx5e_tc_flow *flow; |
504 | struct tc_action *a; | 508 | struct tc_action *a; |
505 | struct mlx5_fc *counter; | 509 | struct mlx5_fc *counter; |
510 | LIST_HEAD(actions); | ||
506 | u64 bytes; | 511 | u64 bytes; |
507 | u64 packets; | 512 | u64 packets; |
508 | u64 lastuse; | 513 | u64 lastuse; |
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
518 | 523 | ||
519 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); | 524 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); |
520 | 525 | ||
521 | tc_for_each_action(a, f->exts) | 526 | tcf_exts_to_list(f->exts, &actions); |
527 | list_for_each_entry(a, &actions, list) | ||
522 | tcf_action_stats_update(a, bytes, packets, lastuse); | 528 | tcf_action_stats_update(a, bytes, packets, lastuse); |
523 | 529 | ||
524 | return 0; | 530 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 7ca9201f7dcb..1721098eef13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h | |||
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); | |||
3383 | */ | 3383 | */ |
3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); | 3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); |
3385 | 3385 | ||
3386 | /* reg_ritr_lb_en | ||
3387 | * Loop-back filter enable for unicast packets. | ||
3388 | * If the flag is set then loop-back filter for unicast packets is | ||
3389 | * implemented on the RIF. Multicast packets are always subject to | ||
3390 | * loop-back filtering. | ||
3391 | * Access: RW | ||
3392 | */ | ||
3393 | MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1); | ||
3394 | |||
3386 | /* reg_ritr_virtual_router | 3395 | /* reg_ritr_virtual_router |
3387 | * Virtual router ID associated with the router interface. | 3396 | * Virtual router ID associated with the router interface. |
3388 | * Access: RW | 3397 | * Access: RW |
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, | |||
3484 | mlxsw_reg_ritr_op_set(payload, op); | 3493 | mlxsw_reg_ritr_op_set(payload, op); |
3485 | mlxsw_reg_ritr_rif_set(payload, rif); | 3494 | mlxsw_reg_ritr_rif_set(payload, rif); |
3486 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); | 3495 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); |
3496 | mlxsw_reg_ritr_lb_en_set(payload, 1); | ||
3487 | mlxsw_reg_ritr_mtu_set(payload, mtu); | 3497 | mlxsw_reg_ritr_mtu_set(payload, mtu); |
3488 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); | 3498 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); |
3489 | } | 3499 | } |
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload, | |||
4000 | { | 4010 | { |
4001 | MLXSW_REG_ZERO(ralue, payload); | 4011 | MLXSW_REG_ZERO(ralue, payload); |
4002 | mlxsw_reg_ralue_protocol_set(payload, protocol); | 4012 | mlxsw_reg_ralue_protocol_set(payload, protocol); |
4013 | mlxsw_reg_ralue_op_set(payload, op); | ||
4003 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); | 4014 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); |
4004 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); | 4015 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); |
4005 | mlxsw_reg_ralue_entry_type_set(payload, | 4016 | mlxsw_reg_ralue_entry_type_set(payload, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3e61500819d..1f8168906811 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -942,8 +942,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) | |||
942 | kfree(mlxsw_sp_vport); | 942 | kfree(mlxsw_sp_vport); |
943 | } | 943 | } |
944 | 944 | ||
945 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | 945 | static int mlxsw_sp_port_add_vid(struct net_device *dev, |
946 | u16 vid) | 946 | __be16 __always_unused proto, u16 vid) |
947 | { | 947 | { |
948 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 948 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
949 | struct mlxsw_sp_port *mlxsw_sp_vport; | 949 | struct mlxsw_sp_port *mlxsw_sp_vport; |
@@ -956,16 +956,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
956 | if (!vid) | 956 | if (!vid) |
957 | return 0; | 957 | return 0; |
958 | 958 | ||
959 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { | 959 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) |
960 | netdev_warn(dev, "VID=%d already configured\n", vid); | ||
961 | return 0; | 960 | return 0; |
962 | } | ||
963 | 961 | ||
964 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); | 962 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); |
965 | if (!mlxsw_sp_vport) { | 963 | if (!mlxsw_sp_vport) |
966 | netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); | ||
967 | return -ENOMEM; | 964 | return -ENOMEM; |
968 | } | ||
969 | 965 | ||
970 | /* When adding the first VLAN interface on a bridged port we need to | 966 | /* When adding the first VLAN interface on a bridged port we need to |
971 | * transition all the active 802.1Q bridge VLANs to use explicit | 967 | * transition all the active 802.1Q bridge VLANs to use explicit |
@@ -973,24 +969,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
973 | */ | 969 | */ |
974 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 970 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { |
975 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); | 971 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); |
976 | if (err) { | 972 | if (err) |
977 | netdev_err(dev, "Failed to set to Virtual mode\n"); | ||
978 | goto err_port_vp_mode_trans; | 973 | goto err_port_vp_mode_trans; |
979 | } | ||
980 | } | 974 | } |
981 | 975 | ||
982 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); | 976 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); |
983 | if (err) { | 977 | if (err) |
984 | netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); | ||
985 | goto err_port_vid_learning_set; | 978 | goto err_port_vid_learning_set; |
986 | } | ||
987 | 979 | ||
988 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); | 980 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); |
989 | if (err) { | 981 | if (err) |
990 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
991 | vid); | ||
992 | goto err_port_add_vid; | 982 | goto err_port_add_vid; |
993 | } | ||
994 | 983 | ||
995 | return 0; | 984 | return 0; |
996 | 985 | ||
@@ -1010,7 +999,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1010 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 999 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
1011 | struct mlxsw_sp_port *mlxsw_sp_vport; | 1000 | struct mlxsw_sp_port *mlxsw_sp_vport; |
1012 | struct mlxsw_sp_fid *f; | 1001 | struct mlxsw_sp_fid *f; |
1013 | int err; | ||
1014 | 1002 | ||
1015 | /* VLAN 0 is removed from HW filter when device goes down, but | 1003 | /* VLAN 0 is removed from HW filter when device goes down, but |
1016 | * it is reserved in our case, so simply return. | 1004 | * it is reserved in our case, so simply return. |
@@ -1019,23 +1007,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1019 | return 0; | 1007 | return 0; |
1020 | 1008 | ||
1021 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); | 1009 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); |
1022 | if (!mlxsw_sp_vport) { | 1010 | if (WARN_ON(!mlxsw_sp_vport)) |
1023 | netdev_warn(dev, "VID=%d does not exist\n", vid); | ||
1024 | return 0; | 1011 | return 0; |
1025 | } | ||
1026 | 1012 | ||
1027 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); | 1013 | mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); |
1028 | if (err) { | ||
1029 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
1030 | vid); | ||
1031 | return err; | ||
1032 | } | ||
1033 | 1014 | ||
1034 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); | 1015 | mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); |
1035 | if (err) { | ||
1036 | netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); | ||
1037 | return err; | ||
1038 | } | ||
1039 | 1016 | ||
1040 | /* Drop FID reference. If this was the last reference the | 1017 | /* Drop FID reference. If this was the last reference the |
1041 | * resources will be freed. | 1018 | * resources will be freed. |
@@ -1048,13 +1025,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1048 | * transition all active 802.1Q bridge VLANs to use VID to FID | 1025 | * transition all active 802.1Q bridge VLANs to use VID to FID |
1049 | * mappings and set port's mode to VLAN mode. | 1026 | * mappings and set port's mode to VLAN mode. |
1050 | */ | 1027 | */ |
1051 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 1028 | if (list_is_singular(&mlxsw_sp_port->vports_list)) |
1052 | err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); | 1029 | mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); |
1053 | if (err) { | ||
1054 | netdev_err(dev, "Failed to set to VLAN mode\n"); | ||
1055 | return err; | ||
1056 | } | ||
1057 | } | ||
1058 | 1030 | ||
1059 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); | 1031 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); |
1060 | 1032 | ||
@@ -1149,6 +1121,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1149 | bool ingress) | 1121 | bool ingress) |
1150 | { | 1122 | { |
1151 | const struct tc_action *a; | 1123 | const struct tc_action *a; |
1124 | LIST_HEAD(actions); | ||
1152 | int err; | 1125 | int err; |
1153 | 1126 | ||
1154 | if (!tc_single_action(cls->exts)) { | 1127 | if (!tc_single_action(cls->exts)) { |
@@ -1156,7 +1129,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1156 | return -ENOTSUPP; | 1129 | return -ENOTSUPP; |
1157 | } | 1130 | } |
1158 | 1131 | ||
1159 | tc_for_each_action(a, cls->exts) { | 1132 | tcf_exts_to_list(cls->exts, &actions); |
1133 | list_for_each_entry(a, &actions, list) { | ||
1160 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) | 1134 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) |
1161 | return -ENOTSUPP; | 1135 | return -ENOTSUPP; |
1162 | 1136 | ||
@@ -2076,6 +2050,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
2076 | return 0; | 2050 | return 0; |
2077 | } | 2051 | } |
2078 | 2052 | ||
2053 | static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2054 | { | ||
2055 | mlxsw_sp_port->pvid = 1; | ||
2056 | |||
2057 | return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); | ||
2058 | } | ||
2059 | |||
2060 | static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2061 | { | ||
2062 | return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2063 | } | ||
2064 | |||
2079 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 2065 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
2080 | bool split, u8 module, u8 width, u8 lane) | 2066 | bool split, u8 module, u8 width, u8 lane) |
2081 | { | 2067 | { |
@@ -2191,7 +2177,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2191 | goto err_port_dcb_init; | 2177 | goto err_port_dcb_init; |
2192 | } | 2178 | } |
2193 | 2179 | ||
2180 | err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); | ||
2181 | if (err) { | ||
2182 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", | ||
2183 | mlxsw_sp_port->local_port); | ||
2184 | goto err_port_pvid_vport_create; | ||
2185 | } | ||
2186 | |||
2194 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); | 2187 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); |
2188 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2195 | err = register_netdev(dev); | 2189 | err = register_netdev(dev); |
2196 | if (err) { | 2190 | if (err) { |
2197 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", | 2191 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", |
@@ -2208,24 +2202,23 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2208 | goto err_core_port_init; | 2202 | goto err_core_port_init; |
2209 | } | 2203 | } |
2210 | 2204 | ||
2211 | err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); | ||
2212 | if (err) | ||
2213 | goto err_port_vlan_init; | ||
2214 | |||
2215 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2216 | return 0; | 2205 | return 0; |
2217 | 2206 | ||
2218 | err_port_vlan_init: | ||
2219 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | ||
2220 | err_core_port_init: | 2207 | err_core_port_init: |
2221 | unregister_netdev(dev); | 2208 | unregister_netdev(dev); |
2222 | err_register_netdev: | 2209 | err_register_netdev: |
2210 | mlxsw_sp->ports[local_port] = NULL; | ||
2211 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | ||
2212 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2213 | err_port_pvid_vport_create: | ||
2214 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2223 | err_port_dcb_init: | 2215 | err_port_dcb_init: |
2224 | err_port_ets_init: | 2216 | err_port_ets_init: |
2225 | err_port_buffers_init: | 2217 | err_port_buffers_init: |
2226 | err_port_admin_status_set: | 2218 | err_port_admin_status_set: |
2227 | err_port_mtu_set: | 2219 | err_port_mtu_set: |
2228 | err_port_speed_by_width_set: | 2220 | err_port_speed_by_width_set: |
2221 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | ||
2229 | err_port_swid_set: | 2222 | err_port_swid_set: |
2230 | err_port_system_port_mapping_set: | 2223 | err_port_system_port_mapping_set: |
2231 | err_dev_addr_init: | 2224 | err_dev_addr_init: |
@@ -2245,12 +2238,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) | |||
2245 | 2238 | ||
2246 | if (!mlxsw_sp_port) | 2239 | if (!mlxsw_sp_port) |
2247 | return; | 2240 | return; |
2248 | mlxsw_sp->ports[local_port] = NULL; | ||
2249 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | 2241 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); |
2250 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ | 2242 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ |
2251 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | 2243 | mlxsw_sp->ports[local_port] = NULL; |
2252 | mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2253 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | 2244 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); |
2245 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2246 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2254 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | 2247 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); |
2255 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); | 2248 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); |
2256 | free_percpu(mlxsw_sp_port->pcpu_stats); | 2249 | free_percpu(mlxsw_sp_port->pcpu_stats); |
@@ -2662,6 +2655,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { | |||
2662 | { | 2655 | { |
2663 | .func = mlxsw_sp_rx_listener_func, | 2656 | .func = mlxsw_sp_rx_listener_func, |
2664 | .local_port = MLXSW_PORT_DONT_CARE, | 2657 | .local_port = MLXSW_PORT_DONT_CARE, |
2658 | .trap_id = MLXSW_TRAP_ID_MTUERROR, | ||
2659 | }, | ||
2660 | { | ||
2661 | .func = mlxsw_sp_rx_listener_func, | ||
2662 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2663 | .trap_id = MLXSW_TRAP_ID_TTLERROR, | ||
2664 | }, | ||
2665 | { | ||
2666 | .func = mlxsw_sp_rx_listener_func, | ||
2667 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2668 | .trap_id = MLXSW_TRAP_ID_LBERROR, | ||
2669 | }, | ||
2670 | { | ||
2671 | .func = mlxsw_sp_rx_listener_func, | ||
2672 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2673 | .trap_id = MLXSW_TRAP_ID_OSPF, | ||
2674 | }, | ||
2675 | { | ||
2676 | .func = mlxsw_sp_rx_listener_func, | ||
2677 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2665 | .trap_id = MLXSW_TRAP_ID_IP2ME, | 2678 | .trap_id = MLXSW_TRAP_ID_IP2ME, |
2666 | }, | 2679 | }, |
2667 | { | 2680 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f69aa37d1521..ab3feb81bd43 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
536 | u16 vid); | 536 | u16 vid); |
537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, | 537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, |
538 | u16 vid_end, bool is_member, bool untagged); | 538 | u16 vid_end, bool is_member, bool untagged); |
539 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | ||
540 | u16 vid); | ||
541 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, | 539 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, |
542 | bool set); | 540 | bool set); |
543 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); | 541 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 074cdda7b6f3..237418a0e6e0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { | |||
330 | MLXSW_SP_CPU_PORT_SB_CM, | 330 | MLXSW_SP_CPU_PORT_SB_CM, |
331 | MLXSW_SP_CPU_PORT_SB_CM, | 331 | MLXSW_SP_CPU_PORT_SB_CM, |
332 | MLXSW_SP_CPU_PORT_SB_CM, | 332 | MLXSW_SP_CPU_PORT_SB_CM, |
333 | MLXSW_SP_CPU_PORT_SB_CM, | 333 | MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), |
334 | MLXSW_SP_CPU_PORT_SB_CM, | 334 | MLXSW_SP_CPU_PORT_SB_CM, |
335 | MLXSW_SP_CPU_PORT_SB_CM, | 335 | MLXSW_SP_CPU_PORT_SB_CM, |
336 | MLXSW_SP_CPU_PORT_SB_CM, | 336 | MLXSW_SP_CPU_PORT_SB_CM, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb7512827..b6ed7f7c531e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | |||
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; | 341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; |
342 | 342 | ||
343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); | 343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); |
344 | mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); | ||
345 | mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); | ||
344 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); | 346 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); |
345 | 347 | ||
346 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), | 348 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), |
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
351 | struct ieee_pfc *pfc) | 353 | struct ieee_pfc *pfc) |
352 | { | 354 | { |
353 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 355 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
356 | bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); | ||
354 | int err; | 357 | int err; |
355 | 358 | ||
356 | if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && | 359 | if (pause_en && pfc->pfc_en) { |
357 | pfc->pfc_en) { | ||
358 | netdev_err(dev, "PAUSE frames already enabled on port\n"); | 360 | netdev_err(dev, "PAUSE frames already enabled on port\n"); |
359 | return -EINVAL; | 361 | return -EINVAL; |
360 | } | 362 | } |
361 | 363 | ||
362 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 364 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
363 | mlxsw_sp_port->dcb.ets->prio_tc, | 365 | mlxsw_sp_port->dcb.ets->prio_tc, |
364 | false, pfc); | 366 | pause_en, pfc); |
365 | if (err) { | 367 | if (err) { |
366 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); | 368 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); |
367 | return err; | 369 | return err; |
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
380 | 382 | ||
381 | err_port_pfc_set: | 383 | err_port_pfc_set: |
382 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 384 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
383 | mlxsw_sp_port->dcb.ets->prio_tc, false, | 385 | mlxsw_sp_port->dcb.ets->prio_tc, pause_en, |
384 | mlxsw_sp_port->dcb.pfc); | 386 | mlxsw_sp_port->dcb.pfc); |
385 | return err; | 387 | return err; |
386 | } | 388 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 81418d629231..90bb93b037ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) | |||
1651 | const struct mlxsw_sp_router_fib4_add_info *info = data; | 1651 | const struct mlxsw_sp_router_fib4_add_info *info = data; |
1652 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; | 1652 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; |
1653 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; | 1653 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; |
1654 | struct mlxsw_sp_vr *vr = fib_entry->vr; | ||
1654 | 1655 | ||
1655 | mlxsw_sp_fib_entry_destroy(fib_entry); | 1656 | mlxsw_sp_fib_entry_destroy(fib_entry); |
1656 | mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); | 1657 | mlxsw_sp_vr_put(mlxsw_sp, vr); |
1657 | kfree(info); | 1658 | kfree(info); |
1658 | } | 1659 | } |
1659 | 1660 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a1ad5e6bdfa8..d1b59cdfacc1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -450,6 +450,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) | |||
450 | 450 | ||
451 | kfree(f); | 451 | kfree(f); |
452 | 452 | ||
453 | mlxsw_sp_fid_map(mlxsw_sp, fid, false); | ||
454 | |||
453 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); | 455 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); |
454 | } | 456 | } |
455 | 457 | ||
@@ -997,13 +999,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, | |||
997 | } | 999 | } |
998 | 1000 | ||
999 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1001 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1000 | u16 vid_begin, u16 vid_end, bool init) | 1002 | u16 vid_begin, u16 vid_end) |
1001 | { | 1003 | { |
1002 | struct net_device *dev = mlxsw_sp_port->dev; | 1004 | struct net_device *dev = mlxsw_sp_port->dev; |
1003 | u16 vid, pvid; | 1005 | u16 vid, pvid; |
1004 | int err; | 1006 | int err; |
1005 | 1007 | ||
1006 | if (!init && !mlxsw_sp_port->bridged) | 1008 | if (!mlxsw_sp_port->bridged) |
1007 | return -EINVAL; | 1009 | return -EINVAL; |
1008 | 1010 | ||
1009 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, | 1011 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, |
@@ -1014,9 +1016,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1014 | return err; | 1016 | return err; |
1015 | } | 1017 | } |
1016 | 1018 | ||
1017 | if (init) | ||
1018 | goto out; | ||
1019 | |||
1020 | pvid = mlxsw_sp_port->pvid; | 1019 | pvid = mlxsw_sp_port->pvid; |
1021 | if (pvid >= vid_begin && pvid <= vid_end) { | 1020 | if (pvid >= vid_begin && pvid <= vid_end) { |
1022 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); | 1021 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); |
@@ -1028,7 +1027,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1028 | 1027 | ||
1029 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); | 1028 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); |
1030 | 1029 | ||
1031 | out: | ||
1032 | /* Changing activity bits only if HW operation succeded */ | 1030 | /* Changing activity bits only if HW operation succeded */ |
1033 | for (vid = vid_begin; vid <= vid_end; vid++) | 1031 | for (vid = vid_begin; vid <= vid_end; vid++) |
1034 | clear_bit(vid, mlxsw_sp_port->active_vlans); | 1032 | clear_bit(vid, mlxsw_sp_port->active_vlans); |
@@ -1039,8 +1037,8 @@ out: | |||
1039 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1037 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1040 | const struct switchdev_obj_port_vlan *vlan) | 1038 | const struct switchdev_obj_port_vlan *vlan) |
1041 | { | 1039 | { |
1042 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, | 1040 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, |
1043 | vlan->vid_begin, vlan->vid_end, false); | 1041 | vlan->vid_end); |
1044 | } | 1042 | } |
1045 | 1043 | ||
1046 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | 1044 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) |
@@ -1048,7 +1046,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | |||
1048 | u16 vid; | 1046 | u16 vid; |
1049 | 1047 | ||
1050 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) | 1048 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) |
1051 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); | 1049 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); |
1052 | } | 1050 | } |
1053 | 1051 | ||
1054 | static int | 1052 | static int |
@@ -1546,32 +1544,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) | |||
1546 | mlxsw_sp_fdb_fini(mlxsw_sp); | 1544 | mlxsw_sp_fdb_fini(mlxsw_sp); |
1547 | } | 1545 | } |
1548 | 1546 | ||
1549 | int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) | ||
1550 | { | ||
1551 | struct net_device *dev = mlxsw_sp_port->dev; | ||
1552 | int err; | ||
1553 | |||
1554 | /* Allow only untagged packets to ingress and tag them internally | ||
1555 | * with VID 1. | ||
1556 | */ | ||
1557 | mlxsw_sp_port->pvid = 1; | ||
1558 | err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, | ||
1559 | true); | ||
1560 | if (err) { | ||
1561 | netdev_err(dev, "Unable to init VLANs\n"); | ||
1562 | return err; | ||
1563 | } | ||
1564 | |||
1565 | /* Add implicit VLAN interface in the device, so that untagged | ||
1566 | * packets will be classified to the default vFID. | ||
1567 | */ | ||
1568 | err = mlxsw_sp_port_add_vid(dev, 0, 1); | ||
1569 | if (err) | ||
1570 | netdev_err(dev, "Failed to configure default vFID\n"); | ||
1571 | |||
1572 | return err; | ||
1573 | } | ||
1574 | |||
1575 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) | 1547 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) |
1576 | { | 1548 | { |
1577 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; | 1549 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 470d7696e9fe..ed8e30186400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h | |||
@@ -56,6 +56,10 @@ enum { | |||
56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, | 56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, |
57 | MLXSW_TRAP_ID_ARPBC = 0x50, | 57 | MLXSW_TRAP_ID_ARPBC = 0x50, |
58 | MLXSW_TRAP_ID_ARPUC = 0x51, | 58 | MLXSW_TRAP_ID_ARPUC = 0x51, |
59 | MLXSW_TRAP_ID_MTUERROR = 0x52, | ||
60 | MLXSW_TRAP_ID_TTLERROR = 0x53, | ||
61 | MLXSW_TRAP_ID_LBERROR = 0x54, | ||
62 | MLXSW_TRAP_ID_OSPF = 0x55, | ||
59 | MLXSW_TRAP_ID_IP2ME = 0x5F, | 63 | MLXSW_TRAP_ID_IP2ME = 0x5F, |
60 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, | 64 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, |
61 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, | 65 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d0dc28f93c0e..226cb08cc055 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -52,40 +52,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) | |||
52 | DCBX_APP_SF_ETHTYPE); | 52 | DCBX_APP_SF_ETHTYPE); |
53 | } | 53 | } |
54 | 54 | ||
55 | static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) | ||
56 | { | ||
57 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); | ||
58 | |||
59 | /* Old MFW */ | ||
60 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
61 | return qed_dcbx_app_ethtype(app_info_bitmap); | ||
62 | |||
63 | return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); | ||
64 | } | ||
65 | |||
55 | static bool qed_dcbx_app_port(u32 app_info_bitmap) | 66 | static bool qed_dcbx_app_port(u32 app_info_bitmap) |
56 | { | 67 | { |
57 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == | 68 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == |
58 | DCBX_APP_SF_PORT); | 69 | DCBX_APP_SF_PORT); |
59 | } | 70 | } |
60 | 71 | ||
61 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) | 72 | static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) |
62 | { | 73 | { |
63 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 74 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); |
64 | proto_id == QED_ETH_TYPE_DEFAULT); | 75 | |
76 | /* Old MFW */ | ||
77 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
78 | return qed_dcbx_app_port(app_info_bitmap); | ||
79 | |||
80 | return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); | ||
65 | } | 81 | } |
66 | 82 | ||
67 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) | 83 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
68 | { | 84 | { |
69 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 85 | bool ethtype; |
70 | proto_id == QED_TCP_PORT_ISCSI); | 86 | |
87 | if (ieee) | ||
88 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
89 | else | ||
90 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
91 | |||
92 | return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); | ||
71 | } | 93 | } |
72 | 94 | ||
73 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) | 95 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
74 | { | 96 | { |
75 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 97 | bool port; |
76 | proto_id == QED_ETH_TYPE_FCOE); | 98 | |
99 | if (ieee) | ||
100 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
101 | DCBX_APP_SF_IEEE_TCP_PORT); | ||
102 | else | ||
103 | port = qed_dcbx_app_port(app_info_bitmap); | ||
104 | |||
105 | return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); | ||
77 | } | 106 | } |
78 | 107 | ||
79 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) | 108 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
80 | { | 109 | { |
81 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 110 | bool ethtype; |
82 | proto_id == QED_ETH_TYPE_ROCE); | 111 | |
112 | if (ieee) | ||
113 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
114 | else | ||
115 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
116 | |||
117 | return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); | ||
83 | } | 118 | } |
84 | 119 | ||
85 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) | 120 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
86 | { | 121 | { |
87 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 122 | bool ethtype; |
88 | proto_id == QED_UDP_PORT_TYPE_ROCE_V2); | 123 | |
124 | if (ieee) | ||
125 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
126 | else | ||
127 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
128 | |||
129 | return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); | ||
130 | } | ||
131 | |||
132 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) | ||
133 | { | ||
134 | bool port; | ||
135 | |||
136 | if (ieee) | ||
137 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
138 | DCBX_APP_SF_IEEE_UDP_PORT); | ||
139 | else | ||
140 | port = qed_dcbx_app_port(app_info_bitmap); | ||
141 | |||
142 | return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); | ||
89 | } | 143 | } |
90 | 144 | ||
91 | static void | 145 | static void |
@@ -164,17 +218,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | |||
164 | static bool | 218 | static bool |
165 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, | 219 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, |
166 | u32 app_prio_bitmap, | 220 | u32 app_prio_bitmap, |
167 | u16 id, enum dcbx_protocol_type *type) | 221 | u16 id, enum dcbx_protocol_type *type, bool ieee) |
168 | { | 222 | { |
169 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { | 223 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { |
170 | *type = DCBX_PROTOCOL_FCOE; | 224 | *type = DCBX_PROTOCOL_FCOE; |
171 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { | 225 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { |
172 | *type = DCBX_PROTOCOL_ROCE; | 226 | *type = DCBX_PROTOCOL_ROCE; |
173 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { | 227 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { |
174 | *type = DCBX_PROTOCOL_ISCSI; | 228 | *type = DCBX_PROTOCOL_ISCSI; |
175 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { | 229 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { |
176 | *type = DCBX_PROTOCOL_ETH; | 230 | *type = DCBX_PROTOCOL_ETH; |
177 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { | 231 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { |
178 | *type = DCBX_PROTOCOL_ROCE_V2; | 232 | *type = DCBX_PROTOCOL_ROCE_V2; |
179 | } else { | 233 | } else { |
180 | *type = DCBX_MAX_PROTOCOL_TYPE; | 234 | *type = DCBX_MAX_PROTOCOL_TYPE; |
@@ -194,17 +248,18 @@ static int | |||
194 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | 248 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, |
195 | struct qed_dcbx_results *p_data, | 249 | struct qed_dcbx_results *p_data, |
196 | struct dcbx_app_priority_entry *p_tbl, | 250 | struct dcbx_app_priority_entry *p_tbl, |
197 | u32 pri_tc_tbl, int count, bool dcbx_enabled) | 251 | u32 pri_tc_tbl, int count, u8 dcbx_version) |
198 | { | 252 | { |
199 | u8 tc, priority_map; | 253 | u8 tc, priority_map; |
200 | enum dcbx_protocol_type type; | 254 | enum dcbx_protocol_type type; |
255 | bool enable, ieee; | ||
201 | u16 protocol_id; | 256 | u16 protocol_id; |
202 | int priority; | 257 | int priority; |
203 | bool enable; | ||
204 | int i; | 258 | int i; |
205 | 259 | ||
206 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); | 260 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); |
207 | 261 | ||
262 | ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); | ||
208 | /* Parse APP TLV */ | 263 | /* Parse APP TLV */ |
209 | for (i = 0; i < count; i++) { | 264 | for (i = 0; i < count; i++) { |
210 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 265 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
@@ -219,7 +274,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
219 | 274 | ||
220 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); | 275 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); |
221 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 276 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
222 | protocol_id, &type)) { | 277 | protocol_id, &type, ieee)) { |
223 | /* ETH always have the enable bit reset, as it gets | 278 | /* ETH always have the enable bit reset, as it gets |
224 | * vlan information per packet. For other protocols, | 279 | * vlan information per packet. For other protocols, |
225 | * should be set according to the dcbx_enabled | 280 | * should be set according to the dcbx_enabled |
@@ -275,15 +330,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
275 | struct dcbx_ets_feature *p_ets; | 330 | struct dcbx_ets_feature *p_ets; |
276 | struct qed_hw_info *p_info; | 331 | struct qed_hw_info *p_info; |
277 | u32 pri_tc_tbl, flags; | 332 | u32 pri_tc_tbl, flags; |
278 | bool dcbx_enabled; | 333 | u8 dcbx_version; |
279 | int num_entries; | 334 | int num_entries; |
280 | int rc = 0; | 335 | int rc = 0; |
281 | 336 | ||
282 | /* If DCBx version is non zero, then negotiation was | ||
283 | * successfuly performed | ||
284 | */ | ||
285 | flags = p_hwfn->p_dcbx_info->operational.flags; | 337 | flags = p_hwfn->p_dcbx_info->operational.flags; |
286 | dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); | 338 | dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); |
287 | 339 | ||
288 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; | 340 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; |
289 | p_tbl = p_app->app_pri_tbl; | 341 | p_tbl = p_app->app_pri_tbl; |
@@ -295,13 +347,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
295 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); | 347 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); |
296 | 348 | ||
297 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, | 349 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, |
298 | num_entries, dcbx_enabled); | 350 | num_entries, dcbx_version); |
299 | if (rc) | 351 | if (rc) |
300 | return rc; | 352 | return rc; |
301 | 353 | ||
302 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); | 354 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); |
303 | data.pf_id = p_hwfn->rel_pf_id; | 355 | data.pf_id = p_hwfn->rel_pf_id; |
304 | data.dcbx_enabled = dcbx_enabled; | 356 | data.dcbx_enabled = !!dcbx_version; |
305 | 357 | ||
306 | qed_dcbx_dp_protocol(p_hwfn, &data); | 358 | qed_dcbx_dp_protocol(p_hwfn, &data); |
307 | 359 | ||
@@ -400,7 +452,7 @@ static void | |||
400 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | 452 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, |
401 | struct dcbx_app_priority_feature *p_app, | 453 | struct dcbx_app_priority_feature *p_app, |
402 | struct dcbx_app_priority_entry *p_tbl, | 454 | struct dcbx_app_priority_entry *p_tbl, |
403 | struct qed_dcbx_params *p_params) | 455 | struct qed_dcbx_params *p_params, bool ieee) |
404 | { | 456 | { |
405 | struct qed_app_entry *entry; | 457 | struct qed_app_entry *entry; |
406 | u8 pri_map; | 458 | u8 pri_map; |
@@ -414,15 +466,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | |||
414 | DCBX_APP_NUM_ENTRIES); | 466 | DCBX_APP_NUM_ENTRIES); |
415 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 467 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
416 | entry = &p_params->app_entry[i]; | 468 | entry = &p_params->app_entry[i]; |
417 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | 469 | if (ieee) { |
418 | DCBX_APP_SF)); | 470 | u8 sf_ieee; |
471 | u32 val; | ||
472 | |||
473 | sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
474 | DCBX_APP_SF_IEEE); | ||
475 | switch (sf_ieee) { | ||
476 | case DCBX_APP_SF_IEEE_RESERVED: | ||
477 | /* Old MFW */ | ||
478 | val = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
479 | DCBX_APP_SF); | ||
480 | entry->sf_ieee = val ? | ||
481 | QED_DCBX_SF_IEEE_TCP_UDP_PORT : | ||
482 | QED_DCBX_SF_IEEE_ETHTYPE; | ||
483 | break; | ||
484 | case DCBX_APP_SF_IEEE_ETHTYPE: | ||
485 | entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; | ||
486 | break; | ||
487 | case DCBX_APP_SF_IEEE_TCP_PORT: | ||
488 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; | ||
489 | break; | ||
490 | case DCBX_APP_SF_IEEE_UDP_PORT: | ||
491 | entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; | ||
492 | break; | ||
493 | case DCBX_APP_SF_IEEE_TCP_UDP_PORT: | ||
494 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; | ||
495 | break; | ||
496 | } | ||
497 | } else { | ||
498 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
499 | DCBX_APP_SF)); | ||
500 | } | ||
501 | |||
419 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); | 502 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); |
420 | entry->prio = ffs(pri_map) - 1; | 503 | entry->prio = ffs(pri_map) - 1; |
421 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 504 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
422 | DCBX_APP_PROTOCOL_ID); | 505 | DCBX_APP_PROTOCOL_ID); |
423 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 506 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
424 | entry->proto_id, | 507 | entry->proto_id, |
425 | &entry->proto_type); | 508 | &entry->proto_type, ieee); |
426 | } | 509 | } |
427 | 510 | ||
428 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, | 511 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, |
@@ -483,7 +566,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, | |||
483 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); | 566 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); |
484 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); | 567 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); |
485 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); | 568 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); |
486 | pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); | 569 | pri_map = p_ets->pri_tc_tbl[0]; |
487 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { | 570 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { |
488 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; | 571 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; |
489 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; | 572 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; |
@@ -500,9 +583,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, | |||
500 | struct dcbx_app_priority_feature *p_app, | 583 | struct dcbx_app_priority_feature *p_app, |
501 | struct dcbx_app_priority_entry *p_tbl, | 584 | struct dcbx_app_priority_entry *p_tbl, |
502 | struct dcbx_ets_feature *p_ets, | 585 | struct dcbx_ets_feature *p_ets, |
503 | u32 pfc, struct qed_dcbx_params *p_params) | 586 | u32 pfc, struct qed_dcbx_params *p_params, bool ieee) |
504 | { | 587 | { |
505 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); | 588 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); |
506 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); | 589 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); |
507 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); | 590 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); |
508 | } | 591 | } |
@@ -516,7 +599,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, | |||
516 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; | 599 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; |
517 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 600 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
518 | p_feat->app.app_pri_tbl, &p_feat->ets, | 601 | p_feat->app.app_pri_tbl, &p_feat->ets, |
519 | p_feat->pfc, ¶ms->local.params); | 602 | p_feat->pfc, ¶ms->local.params, false); |
520 | params->local.valid = true; | 603 | params->local.valid = true; |
521 | } | 604 | } |
522 | 605 | ||
@@ -529,7 +612,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, | |||
529 | p_feat = &p_hwfn->p_dcbx_info->remote.features; | 612 | p_feat = &p_hwfn->p_dcbx_info->remote.features; |
530 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 613 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
531 | p_feat->app.app_pri_tbl, &p_feat->ets, | 614 | p_feat->app.app_pri_tbl, &p_feat->ets, |
532 | p_feat->pfc, ¶ms->remote.params); | 615 | p_feat->pfc, ¶ms->remote.params, false); |
533 | params->remote.valid = true; | 616 | params->remote.valid = true; |
534 | } | 617 | } |
535 | 618 | ||
@@ -574,7 +657,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, | |||
574 | 657 | ||
575 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 658 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
576 | p_feat->app.app_pri_tbl, &p_feat->ets, | 659 | p_feat->app.app_pri_tbl, &p_feat->ets, |
577 | p_feat->pfc, ¶ms->operational.params); | 660 | p_feat->pfc, ¶ms->operational.params, |
661 | p_operational->ieee); | ||
578 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); | 662 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); |
579 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); | 663 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); |
580 | p_operational->err = err; | 664 | p_operational->err = err; |
@@ -944,7 +1028,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
944 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); | 1028 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); |
945 | p_ets->pri_tc_tbl[0] |= val; | 1029 | p_ets->pri_tc_tbl[0] |= val; |
946 | } | 1030 | } |
947 | p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]); | ||
948 | for (i = 0; i < 2; i++) { | 1031 | for (i = 0; i < 2; i++) { |
949 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); | 1032 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); |
950 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); | 1033 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); |
@@ -954,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
954 | static void | 1037 | static void |
955 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | 1038 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, |
956 | struct dcbx_app_priority_feature *p_app, | 1039 | struct dcbx_app_priority_feature *p_app, |
957 | struct qed_dcbx_params *p_params) | 1040 | struct qed_dcbx_params *p_params, bool ieee) |
958 | { | 1041 | { |
959 | u32 *entry; | 1042 | u32 *entry; |
960 | int i; | 1043 | int i; |
@@ -975,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | |||
975 | 1058 | ||
976 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 1059 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
977 | entry = &p_app->app_pri_tbl[i].entry; | 1060 | entry = &p_app->app_pri_tbl[i].entry; |
978 | *entry &= ~DCBX_APP_SF_MASK; | 1061 | if (ieee) { |
979 | if (p_params->app_entry[i].ethtype) | 1062 | *entry &= ~DCBX_APP_SF_IEEE_MASK; |
980 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | 1063 | switch (p_params->app_entry[i].sf_ieee) { |
981 | DCBX_APP_SF_SHIFT); | 1064 | case QED_DCBX_SF_IEEE_ETHTYPE: |
982 | else | 1065 | *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << |
983 | *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); | 1066 | DCBX_APP_SF_IEEE_SHIFT); |
1067 | break; | ||
1068 | case QED_DCBX_SF_IEEE_TCP_PORT: | ||
1069 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << | ||
1070 | DCBX_APP_SF_IEEE_SHIFT); | ||
1071 | break; | ||
1072 | case QED_DCBX_SF_IEEE_UDP_PORT: | ||
1073 | *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << | ||
1074 | DCBX_APP_SF_IEEE_SHIFT); | ||
1075 | break; | ||
1076 | case QED_DCBX_SF_IEEE_TCP_UDP_PORT: | ||
1077 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << | ||
1078 | DCBX_APP_SF_IEEE_SHIFT); | ||
1079 | break; | ||
1080 | } | ||
1081 | } else { | ||
1082 | *entry &= ~DCBX_APP_SF_MASK; | ||
1083 | if (p_params->app_entry[i].ethtype) | ||
1084 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | ||
1085 | DCBX_APP_SF_SHIFT); | ||
1086 | else | ||
1087 | *entry |= ((u32)DCBX_APP_SF_PORT << | ||
1088 | DCBX_APP_SF_SHIFT); | ||
1089 | } | ||
1090 | |||
984 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; | 1091 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; |
985 | *entry |= ((u32)p_params->app_entry[i].proto_id << | 1092 | *entry |= ((u32)p_params->app_entry[i].proto_id << |
986 | DCBX_APP_PROTOCOL_ID_SHIFT); | 1093 | DCBX_APP_PROTOCOL_ID_SHIFT); |
@@ -995,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
995 | struct dcbx_local_params *local_admin, | 1102 | struct dcbx_local_params *local_admin, |
996 | struct qed_dcbx_set *params) | 1103 | struct qed_dcbx_set *params) |
997 | { | 1104 | { |
1105 | bool ieee = false; | ||
1106 | |||
998 | local_admin->flags = 0; | 1107 | local_admin->flags = 0; |
999 | memcpy(&local_admin->features, | 1108 | memcpy(&local_admin->features, |
1000 | &p_hwfn->p_dcbx_info->operational.features, | 1109 | &p_hwfn->p_dcbx_info->operational.features, |
1001 | sizeof(local_admin->features)); | 1110 | sizeof(local_admin->features)); |
1002 | 1111 | ||
1003 | if (params->enabled) | 1112 | if (params->enabled) { |
1004 | local_admin->config = params->ver_num; | 1113 | local_admin->config = params->ver_num; |
1005 | else | 1114 | ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); |
1115 | } else { | ||
1006 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; | 1116 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; |
1117 | } | ||
1007 | 1118 | ||
1008 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) | 1119 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) |
1009 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, | 1120 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, |
@@ -1015,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
1015 | 1126 | ||
1016 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) | 1127 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) |
1017 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, | 1128 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, |
1018 | ¶ms->config.params); | 1129 | ¶ms->config.params, ieee); |
1019 | } | 1130 | } |
1020 | 1131 | ||
1021 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, | 1132 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
@@ -1596,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev, | |||
1596 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) | 1707 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) |
1597 | break; | 1708 | break; |
1598 | /* First empty slot */ | 1709 | /* First empty slot */ |
1599 | if (!entry->proto_id) | 1710 | if (!entry->proto_id) { |
1711 | dcbx_set.config.params.num_app_entries++; | ||
1600 | break; | 1712 | break; |
1713 | } | ||
1601 | } | 1714 | } |
1602 | 1715 | ||
1603 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 1716 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
@@ -2117,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) | |||
2117 | (entry->proto_id == app->protocol)) | 2230 | (entry->proto_id == app->protocol)) |
2118 | break; | 2231 | break; |
2119 | /* First empty slot */ | 2232 | /* First empty slot */ |
2120 | if (!entry->proto_id) | 2233 | if (!entry->proto_id) { |
2234 | dcbx_set.config.params.num_app_entries++; | ||
2121 | break; | 2235 | break; |
2236 | } | ||
2122 | } | 2237 | } |
2123 | 2238 | ||
2124 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 2239 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 592784019994..6f9d3b831a2a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry { | |||
6850 | #define DCBX_APP_SF_SHIFT 8 | 6850 | #define DCBX_APP_SF_SHIFT 8 |
6851 | #define DCBX_APP_SF_ETHTYPE 0 | 6851 | #define DCBX_APP_SF_ETHTYPE 0 |
6852 | #define DCBX_APP_SF_PORT 1 | 6852 | #define DCBX_APP_SF_PORT 1 |
6853 | #define DCBX_APP_SF_IEEE_MASK 0x0000f000 | ||
6854 | #define DCBX_APP_SF_IEEE_SHIFT 12 | ||
6855 | #define DCBX_APP_SF_IEEE_RESERVED 0 | ||
6856 | #define DCBX_APP_SF_IEEE_ETHTYPE 1 | ||
6857 | #define DCBX_APP_SF_IEEE_TCP_PORT 2 | ||
6858 | #define DCBX_APP_SF_IEEE_UDP_PORT 3 | ||
6859 | #define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 | ||
6860 | |||
6853 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 | 6861 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 |
6854 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 | 6862 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 |
6855 | }; | 6863 | }; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fd973f4f16c7..49bad00a0f8f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define _QLCNIC_LINUX_MAJOR 5 | 38 | #define _QLCNIC_LINUX_MAJOR 5 |
39 | #define _QLCNIC_LINUX_MINOR 3 | 39 | #define _QLCNIC_LINUX_MINOR 3 |
40 | #define _QLCNIC_LINUX_SUBVERSION 64 | 40 | #define _QLCNIC_LINUX_SUBVERSION 65 |
41 | #define QLCNIC_LINUX_VERSIONID "5.3.64" | 41 | #define QLCNIC_LINUX_VERSIONID "5.3.65" |
42 | #define QLCNIC_DRV_IDC_VER 0x01 | 42 | #define QLCNIC_DRV_IDC_VER 0x01 |
43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d3b075..fedd7366713c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -102,7 +102,6 @@ | |||
102 | #define QLCNIC_RESPONSE_DESC 0x05 | 102 | #define QLCNIC_RESPONSE_DESC 0x05 |
103 | #define QLCNIC_LRO_DESC 0x12 | 103 | #define QLCNIC_LRO_DESC 0x12 |
104 | 104 | ||
105 | #define QLCNIC_TX_POLL_BUDGET 128 | ||
106 | #define QLCNIC_TCP_HDR_SIZE 20 | 105 | #define QLCNIC_TCP_HDR_SIZE 20 |
107 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 | 106 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 |
108 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) | 107 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) |
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) | |||
2008 | struct qlcnic_host_tx_ring *tx_ring; | 2007 | struct qlcnic_host_tx_ring *tx_ring; |
2009 | struct qlcnic_adapter *adapter; | 2008 | struct qlcnic_adapter *adapter; |
2010 | 2009 | ||
2011 | budget = QLCNIC_TX_POLL_BUDGET; | ||
2012 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); | 2010 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); |
2013 | adapter = tx_ring->adapter; | 2011 | adapter = tx_ring->adapter; |
2014 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); | 2012 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c8285..24061b9b92e8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | |||
@@ -156,10 +156,8 @@ struct qlcnic_vf_info { | |||
156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ | 156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ |
157 | }; | 157 | }; |
158 | 158 | ||
159 | struct qlcnic_async_work_list { | 159 | struct qlcnic_async_cmd { |
160 | struct list_head list; | 160 | struct list_head list; |
161 | struct work_struct work; | ||
162 | void *ptr; | ||
163 | struct qlcnic_cmd_args *cmd; | 161 | struct qlcnic_cmd_args *cmd; |
164 | }; | 162 | }; |
165 | 163 | ||
@@ -168,7 +166,10 @@ struct qlcnic_back_channel { | |||
168 | struct workqueue_struct *bc_trans_wq; | 166 | struct workqueue_struct *bc_trans_wq; |
169 | struct workqueue_struct *bc_async_wq; | 167 | struct workqueue_struct *bc_async_wq; |
170 | struct workqueue_struct *bc_flr_wq; | 168 | struct workqueue_struct *bc_flr_wq; |
171 | struct list_head async_list; | 169 | struct qlcnic_adapter *adapter; |
170 | struct list_head async_cmd_list; | ||
171 | struct work_struct vf_async_work; | ||
172 | spinlock_t queue_lock; /* async_cmd_list queue lock */ | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | struct qlcnic_sriov { | 175 | struct qlcnic_sriov { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729ba2e..d7107055ec60 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 | 29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 |
30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 | 30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 |
31 | 31 | ||
32 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); | ||
32 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); | 33 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); |
33 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); | 34 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); |
34 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); | 35 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); |
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) | |||
177 | } | 178 | } |
178 | 179 | ||
179 | bc->bc_async_wq = wq; | 180 | bc->bc_async_wq = wq; |
180 | INIT_LIST_HEAD(&bc->async_list); | 181 | INIT_LIST_HEAD(&bc->async_cmd_list); |
182 | INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); | ||
183 | spin_lock_init(&bc->queue_lock); | ||
184 | bc->adapter = adapter; | ||
181 | 185 | ||
182 | for (i = 0; i < num_vfs; i++) { | 186 | for (i = 0; i < num_vfs; i++) { |
183 | vf = &sriov->vf_info[i]; | 187 | vf = &sriov->vf_info[i]; |
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, | |||
1517 | 1521 | ||
1518 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) | 1522 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) |
1519 | { | 1523 | { |
1520 | struct list_head *head = &bc->async_list; | 1524 | struct list_head *head = &bc->async_cmd_list; |
1521 | struct qlcnic_async_work_list *entry; | 1525 | struct qlcnic_async_cmd *entry; |
1522 | 1526 | ||
1523 | flush_workqueue(bc->bc_async_wq); | 1527 | flush_workqueue(bc->bc_async_wq); |
1528 | cancel_work_sync(&bc->vf_async_work); | ||
1529 | |||
1530 | spin_lock(&bc->queue_lock); | ||
1524 | while (!list_empty(head)) { | 1531 | while (!list_empty(head)) { |
1525 | entry = list_entry(head->next, struct qlcnic_async_work_list, | 1532 | entry = list_entry(head->next, struct qlcnic_async_cmd, |
1526 | list); | 1533 | list); |
1527 | cancel_work_sync(&entry->work); | ||
1528 | list_del(&entry->list); | 1534 | list_del(&entry->list); |
1535 | kfree(entry->cmd); | ||
1529 | kfree(entry); | 1536 | kfree(entry); |
1530 | } | 1537 | } |
1538 | spin_unlock(&bc->queue_lock); | ||
1531 | } | 1539 | } |
1532 | 1540 | ||
1533 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | 1541 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) |
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | |||
1587 | 1595 | ||
1588 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) | 1596 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) |
1589 | { | 1597 | { |
1590 | struct qlcnic_async_work_list *entry; | 1598 | struct qlcnic_async_cmd *entry, *tmp; |
1591 | struct qlcnic_adapter *adapter; | 1599 | struct qlcnic_back_channel *bc; |
1592 | struct qlcnic_cmd_args *cmd; | 1600 | struct qlcnic_cmd_args *cmd; |
1601 | struct list_head *head; | ||
1602 | LIST_HEAD(del_list); | ||
1603 | |||
1604 | bc = container_of(work, struct qlcnic_back_channel, vf_async_work); | ||
1605 | head = &bc->async_cmd_list; | ||
1606 | |||
1607 | spin_lock(&bc->queue_lock); | ||
1608 | list_splice_init(head, &del_list); | ||
1609 | spin_unlock(&bc->queue_lock); | ||
1610 | |||
1611 | list_for_each_entry_safe(entry, tmp, &del_list, list) { | ||
1612 | list_del(&entry->list); | ||
1613 | cmd = entry->cmd; | ||
1614 | __qlcnic_sriov_issue_cmd(bc->adapter, cmd); | ||
1615 | kfree(entry); | ||
1616 | } | ||
1617 | |||
1618 | if (!list_empty(head)) | ||
1619 | queue_work(bc->bc_async_wq, &bc->vf_async_work); | ||
1593 | 1620 | ||
1594 | entry = container_of(work, struct qlcnic_async_work_list, work); | ||
1595 | adapter = entry->ptr; | ||
1596 | cmd = entry->cmd; | ||
1597 | __qlcnic_sriov_issue_cmd(adapter, cmd); | ||
1598 | return; | 1621 | return; |
1599 | } | 1622 | } |
1600 | 1623 | ||
1601 | static struct qlcnic_async_work_list * | 1624 | static struct qlcnic_async_cmd * |
1602 | qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) | 1625 | qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, |
1626 | struct qlcnic_cmd_args *cmd) | ||
1603 | { | 1627 | { |
1604 | struct list_head *node; | 1628 | struct qlcnic_async_cmd *entry = NULL; |
1605 | struct qlcnic_async_work_list *entry = NULL; | ||
1606 | u8 empty = 0; | ||
1607 | 1629 | ||
1608 | list_for_each(node, &bc->async_list) { | 1630 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
1609 | entry = list_entry(node, struct qlcnic_async_work_list, list); | 1631 | if (!entry) |
1610 | if (!work_pending(&entry->work)) { | 1632 | return NULL; |
1611 | empty = 1; | ||
1612 | break; | ||
1613 | } | ||
1614 | } | ||
1615 | 1633 | ||
1616 | if (!empty) { | 1634 | entry->cmd = cmd; |
1617 | entry = kzalloc(sizeof(struct qlcnic_async_work_list), | 1635 | |
1618 | GFP_ATOMIC); | 1636 | spin_lock(&bc->queue_lock); |
1619 | if (entry == NULL) | 1637 | list_add_tail(&entry->list, &bc->async_cmd_list); |
1620 | return NULL; | 1638 | spin_unlock(&bc->queue_lock); |
1621 | list_add_tail(&entry->list, &bc->async_list); | ||
1622 | } | ||
1623 | 1639 | ||
1624 | return entry; | 1640 | return entry; |
1625 | } | 1641 | } |
1626 | 1642 | ||
1627 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, | 1643 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, |
1628 | work_func_t func, void *data, | ||
1629 | struct qlcnic_cmd_args *cmd) | 1644 | struct qlcnic_cmd_args *cmd) |
1630 | { | 1645 | { |
1631 | struct qlcnic_async_work_list *entry = NULL; | 1646 | struct qlcnic_async_cmd *entry = NULL; |
1632 | 1647 | ||
1633 | entry = qlcnic_sriov_get_free_node_async_work(bc); | 1648 | entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); |
1634 | if (!entry) | 1649 | if (!entry) { |
1650 | qlcnic_free_mbx_args(cmd); | ||
1651 | kfree(cmd); | ||
1635 | return; | 1652 | return; |
1653 | } | ||
1636 | 1654 | ||
1637 | entry->ptr = data; | 1655 | queue_work(bc->bc_async_wq, &bc->vf_async_work); |
1638 | entry->cmd = cmd; | ||
1639 | INIT_WORK(&entry->work, func); | ||
1640 | queue_work(bc->bc_async_wq, &entry->work); | ||
1641 | } | 1656 | } |
1642 | 1657 | ||
1643 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | 1658 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, |
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | |||
1649 | if (adapter->need_fw_reset) | 1664 | if (adapter->need_fw_reset) |
1650 | return -EIO; | 1665 | return -EIO; |
1651 | 1666 | ||
1652 | qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, | 1667 | qlcnic_sriov_schedule_async_cmd(bc, cmd); |
1653 | adapter, cmd); | 1668 | |
1654 | return 0; | 1669 | return 0; |
1655 | } | 1670 | } |
1656 | 1671 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c51f34693eae..f85d605e4560 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
734 | netif_receive_skb(skb); | 734 | netif_receive_skb(skb); |
735 | ndev->stats.rx_bytes += len; | 735 | ndev->stats.rx_bytes += len; |
736 | ndev->stats.rx_packets++; | 736 | ndev->stats.rx_packets++; |
737 | kmemleak_not_leak(new_skb); | ||
737 | } else { | 738 | } else { |
738 | ndev->stats.rx_dropped++; | 739 | ndev->stats.rx_dropped++; |
739 | new_skb = skb; | 740 | new_skb = skb; |
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
1325 | kfree_skb(skb); | 1326 | kfree_skb(skb); |
1326 | goto err_cleanup; | 1327 | goto err_cleanup; |
1327 | } | 1328 | } |
1329 | kmemleak_not_leak(skb); | ||
1328 | } | 1330 | } |
1329 | /* continue even if we didn't manage to submit all | 1331 | /* continue even if we didn't manage to submit all |
1330 | * receive descs | 1332 | * receive descs |
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a77145a0fa..8fd131207ee1 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c | |||
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { | |||
166 | 166 | ||
167 | static void tsi108_timed_checker(unsigned long dev_ptr); | 167 | static void tsi108_timed_checker(unsigned long dev_ptr); |
168 | 168 | ||
169 | #ifdef DEBUG | ||
169 | static void dump_eth_one(struct net_device *dev) | 170 | static void dump_eth_one(struct net_device *dev) |
170 | { | 171 | { |
171 | struct tsi108_prv_data *data = netdev_priv(dev); | 172 | struct tsi108_prv_data *data = netdev_priv(dev); |
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) | |||
190 | TSI_READ(TSI108_EC_RXESTAT), | 191 | TSI_READ(TSI108_EC_RXESTAT), |
191 | TSI_READ(TSI108_EC_RXERR), data->rxpending); | 192 | TSI_READ(TSI108_EC_RXERR), data->rxpending); |
192 | } | 193 | } |
194 | #endif | ||
193 | 195 | ||
194 | /* Synchronization is needed between the thread and up/down events. | 196 | /* Synchronization is needed between the thread and up/down events. |
195 | * Note that the PHY is accessed through the same registers for both | 197 | * Note that the PHY is accessed through the same registers for both |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 467fb8b4d083..591af71eae56 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -644,12 +644,6 @@ struct netvsc_reconfig { | |||
644 | u32 event; | 644 | u32 event; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | struct garp_wrk { | ||
648 | struct work_struct dwrk; | ||
649 | struct net_device *netdev; | ||
650 | struct netvsc_device *netvsc_dev; | ||
651 | }; | ||
652 | |||
653 | /* The context of the netvsc device */ | 647 | /* The context of the netvsc device */ |
654 | struct net_device_context { | 648 | struct net_device_context { |
655 | /* point back to our device context */ | 649 | /* point back to our device context */ |
@@ -667,7 +661,6 @@ struct net_device_context { | |||
667 | 661 | ||
668 | struct work_struct work; | 662 | struct work_struct work; |
669 | u32 msg_enable; /* debug level */ | 663 | u32 msg_enable; /* debug level */ |
670 | struct garp_wrk gwrk; | ||
671 | 664 | ||
672 | struct netvsc_stats __percpu *tx_stats; | 665 | struct netvsc_stats __percpu *tx_stats; |
673 | struct netvsc_stats __percpu *rx_stats; | 666 | struct netvsc_stats __percpu *rx_stats; |
@@ -678,6 +671,15 @@ struct net_device_context { | |||
678 | 671 | ||
679 | /* the device is going away */ | 672 | /* the device is going away */ |
680 | bool start_remove; | 673 | bool start_remove; |
674 | |||
675 | /* State to manage the associated VF interface. */ | ||
676 | struct net_device *vf_netdev; | ||
677 | bool vf_inject; | ||
678 | atomic_t vf_use_cnt; | ||
679 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
680 | u32 vf_alloc; | ||
681 | /* Serial number of the VF to team with */ | ||
682 | u32 vf_serial; | ||
681 | }; | 683 | }; |
682 | 684 | ||
683 | /* Per netvsc device */ | 685 | /* Per netvsc device */ |
@@ -733,15 +735,7 @@ struct netvsc_device { | |||
733 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ | 735 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ |
734 | u32 pkt_align; /* alignment bytes, e.g. 8 */ | 736 | u32 pkt_align; /* alignment bytes, e.g. 8 */ |
735 | 737 | ||
736 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
737 | u32 vf_alloc; | ||
738 | /* Serial number of the VF to team with */ | ||
739 | u32 vf_serial; | ||
740 | atomic_t open_cnt; | 738 | atomic_t open_cnt; |
741 | /* State to manage the associated VF interface. */ | ||
742 | bool vf_inject; | ||
743 | struct net_device *vf_netdev; | ||
744 | atomic_t vf_use_cnt; | ||
745 | }; | 739 | }; |
746 | 740 | ||
747 | static inline struct netvsc_device * | 741 | static inline struct netvsc_device * |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 20e09174ff62..410fb8e81376 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void) | |||
77 | init_waitqueue_head(&net_device->wait_drain); | 77 | init_waitqueue_head(&net_device->wait_drain); |
78 | net_device->destroy = false; | 78 | net_device->destroy = false; |
79 | atomic_set(&net_device->open_cnt, 0); | 79 | atomic_set(&net_device->open_cnt, 0); |
80 | atomic_set(&net_device->vf_use_cnt, 0); | ||
81 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 80 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
82 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 81 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
83 | 82 | ||
84 | net_device->vf_netdev = NULL; | ||
85 | net_device->vf_inject = false; | ||
86 | |||
87 | return net_device; | 83 | return net_device; |
88 | } | 84 | } |
89 | 85 | ||
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev, | |||
1106 | nvscdev->send_table[i] = tab[i]; | 1102 | nvscdev->send_table[i] = tab[i]; |
1107 | } | 1103 | } |
1108 | 1104 | ||
1109 | static void netvsc_send_vf(struct netvsc_device *nvdev, | 1105 | static void netvsc_send_vf(struct net_device_context *net_device_ctx, |
1110 | struct nvsp_message *nvmsg) | 1106 | struct nvsp_message *nvmsg) |
1111 | { | 1107 | { |
1112 | nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; | 1108 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
1113 | nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | 1109 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | static inline void netvsc_receive_inband(struct hv_device *hdev, | 1112 | static inline void netvsc_receive_inband(struct hv_device *hdev, |
1117 | struct netvsc_device *nvdev, | 1113 | struct net_device_context *net_device_ctx, |
1118 | struct nvsp_message *nvmsg) | 1114 | struct nvsp_message *nvmsg) |
1119 | { | 1115 | { |
1120 | switch (nvmsg->hdr.msg_type) { | 1116 | switch (nvmsg->hdr.msg_type) { |
1121 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: | 1117 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: |
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev, | |||
1123 | break; | 1119 | break; |
1124 | 1120 | ||
1125 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: | 1121 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: |
1126 | netvsc_send_vf(nvdev, nvmsg); | 1122 | netvsc_send_vf(net_device_ctx, nvmsg); |
1127 | break; | 1123 | break; |
1128 | } | 1124 | } |
1129 | } | 1125 | } |
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1136 | struct vmpacket_descriptor *desc) | 1132 | struct vmpacket_descriptor *desc) |
1137 | { | 1133 | { |
1138 | struct nvsp_message *nvmsg; | 1134 | struct nvsp_message *nvmsg; |
1135 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | ||
1139 | 1136 | ||
1140 | nvmsg = (struct nvsp_message *)((unsigned long) | 1137 | nvmsg = (struct nvsp_message *)((unsigned long) |
1141 | desc + (desc->offset8 << 3)); | 1138 | desc + (desc->offset8 << 3)); |
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1150 | break; | 1147 | break; |
1151 | 1148 | ||
1152 | case VM_PKT_DATA_INBAND: | 1149 | case VM_PKT_DATA_INBAND: |
1153 | netvsc_receive_inband(device, net_device, nvmsg); | 1150 | netvsc_receive_inband(device, net_device_ctx, nvmsg); |
1154 | break; | 1151 | break; |
1155 | 1152 | ||
1156 | default: | 1153 | default: |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 41bd952cc28d..3ba29fc80d05 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
658 | struct sk_buff *skb; | 658 | struct sk_buff *skb; |
659 | struct sk_buff *vf_skb; | 659 | struct sk_buff *vf_skb; |
660 | struct netvsc_stats *rx_stats; | 660 | struct netvsc_stats *rx_stats; |
661 | struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; | ||
662 | u32 bytes_recvd = packet->total_data_buflen; | 661 | u32 bytes_recvd = packet->total_data_buflen; |
663 | int ret = 0; | 662 | int ret = 0; |
664 | 663 | ||
665 | if (!net || net->reg_state != NETREG_REGISTERED) | 664 | if (!net || net->reg_state != NETREG_REGISTERED) |
666 | return NVSP_STAT_FAIL; | 665 | return NVSP_STAT_FAIL; |
667 | 666 | ||
668 | if (READ_ONCE(netvsc_dev->vf_inject)) { | 667 | if (READ_ONCE(net_device_ctx->vf_inject)) { |
669 | atomic_inc(&netvsc_dev->vf_use_cnt); | 668 | atomic_inc(&net_device_ctx->vf_use_cnt); |
670 | if (!READ_ONCE(netvsc_dev->vf_inject)) { | 669 | if (!READ_ONCE(net_device_ctx->vf_inject)) { |
671 | /* | 670 | /* |
672 | * We raced; just move on. | 671 | * We raced; just move on. |
673 | */ | 672 | */ |
674 | atomic_dec(&netvsc_dev->vf_use_cnt); | 673 | atomic_dec(&net_device_ctx->vf_use_cnt); |
675 | goto vf_injection_done; | 674 | goto vf_injection_done; |
676 | } | 675 | } |
677 | 676 | ||
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
683 | * the host). Deliver these via the VF interface | 682 | * the host). Deliver these via the VF interface |
684 | * in the guest. | 683 | * in the guest. |
685 | */ | 684 | */ |
686 | vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, | 685 | vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev, |
687 | csum_info, *data, vlan_tci); | 686 | packet, csum_info, *data, |
687 | vlan_tci); | ||
688 | if (vf_skb != NULL) { | 688 | if (vf_skb != NULL) { |
689 | ++netvsc_dev->vf_netdev->stats.rx_packets; | 689 | ++net_device_ctx->vf_netdev->stats.rx_packets; |
690 | netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; | 690 | net_device_ctx->vf_netdev->stats.rx_bytes += |
691 | bytes_recvd; | ||
691 | netif_receive_skb(vf_skb); | 692 | netif_receive_skb(vf_skb); |
692 | } else { | 693 | } else { |
693 | ++net->stats.rx_dropped; | 694 | ++net->stats.rx_dropped; |
694 | ret = NVSP_STAT_FAIL; | 695 | ret = NVSP_STAT_FAIL; |
695 | } | 696 | } |
696 | atomic_dec(&netvsc_dev->vf_use_cnt); | 697 | atomic_dec(&net_device_ctx->vf_use_cnt); |
697 | return ret; | 698 | return ret; |
698 | } | 699 | } |
699 | 700 | ||
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev) | |||
1150 | free_netdev(netdev); | 1151 | free_netdev(netdev); |
1151 | } | 1152 | } |
1152 | 1153 | ||
1153 | static void netvsc_notify_peers(struct work_struct *wrk) | ||
1154 | { | ||
1155 | struct garp_wrk *gwrk; | ||
1156 | |||
1157 | gwrk = container_of(wrk, struct garp_wrk, dwrk); | ||
1158 | |||
1159 | netdev_notify_peers(gwrk->netdev); | ||
1160 | |||
1161 | atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); | ||
1162 | } | ||
1163 | |||
1164 | static struct net_device *get_netvsc_net_device(char *mac) | 1154 | static struct net_device *get_netvsc_net_device(char *mac) |
1165 | { | 1155 | { |
1166 | struct net_device *dev, *found = NULL; | 1156 | struct net_device *dev, *found = NULL; |
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1203 | 1193 | ||
1204 | net_device_ctx = netdev_priv(ndev); | 1194 | net_device_ctx = netdev_priv(ndev); |
1205 | netvsc_dev = net_device_ctx->nvdev; | 1195 | netvsc_dev = net_device_ctx->nvdev; |
1206 | if (netvsc_dev == NULL) | 1196 | if (!netvsc_dev || net_device_ctx->vf_netdev) |
1207 | return NOTIFY_DONE; | 1197 | return NOTIFY_DONE; |
1208 | 1198 | ||
1209 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); | 1199 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); |
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1211 | * Take a reference on the module. | 1201 | * Take a reference on the module. |
1212 | */ | 1202 | */ |
1213 | try_module_get(THIS_MODULE); | 1203 | try_module_get(THIS_MODULE); |
1214 | netvsc_dev->vf_netdev = vf_netdev; | 1204 | net_device_ctx->vf_netdev = vf_netdev; |
1215 | return NOTIFY_OK; | 1205 | return NOTIFY_OK; |
1216 | } | 1206 | } |
1217 | 1207 | ||
1208 | static void netvsc_inject_enable(struct net_device_context *net_device_ctx) | ||
1209 | { | ||
1210 | net_device_ctx->vf_inject = true; | ||
1211 | } | ||
1212 | |||
1213 | static void netvsc_inject_disable(struct net_device_context *net_device_ctx) | ||
1214 | { | ||
1215 | net_device_ctx->vf_inject = false; | ||
1216 | |||
1217 | /* Wait for currently active users to drain out. */ | ||
1218 | while (atomic_read(&net_device_ctx->vf_use_cnt) != 0) | ||
1219 | udelay(50); | ||
1220 | } | ||
1218 | 1221 | ||
1219 | static int netvsc_vf_up(struct net_device *vf_netdev) | 1222 | static int netvsc_vf_up(struct net_device *vf_netdev) |
1220 | { | 1223 | { |
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1233 | net_device_ctx = netdev_priv(ndev); | 1236 | net_device_ctx = netdev_priv(ndev); |
1234 | netvsc_dev = net_device_ctx->nvdev; | 1237 | netvsc_dev = net_device_ctx->nvdev; |
1235 | 1238 | ||
1236 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1239 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1237 | return NOTIFY_DONE; | 1240 | return NOTIFY_DONE; |
1238 | 1241 | ||
1239 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); | 1242 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); |
1240 | netvsc_dev->vf_inject = true; | 1243 | netvsc_inject_enable(net_device_ctx); |
1241 | 1244 | ||
1242 | /* | 1245 | /* |
1243 | * Open the device before switching data path. | 1246 | * Open the device before switching data path. |
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1252 | 1255 | ||
1253 | netif_carrier_off(ndev); | 1256 | netif_carrier_off(ndev); |
1254 | 1257 | ||
1255 | /* | 1258 | /* Now notify peers through VF device. */ |
1256 | * Now notify peers. We are scheduling work to | 1259 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); |
1257 | * notify peers; take a reference to prevent | ||
1258 | * the VF interface from vanishing. | ||
1259 | */ | ||
1260 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1261 | net_device_ctx->gwrk.netdev = vf_netdev; | ||
1262 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1263 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1264 | 1260 | ||
1265 | return NOTIFY_OK; | 1261 | return NOTIFY_OK; |
1266 | } | 1262 | } |
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev) | |||
1283 | net_device_ctx = netdev_priv(ndev); | 1279 | net_device_ctx = netdev_priv(ndev); |
1284 | netvsc_dev = net_device_ctx->nvdev; | 1280 | netvsc_dev = net_device_ctx->nvdev; |
1285 | 1281 | ||
1286 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1282 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1287 | return NOTIFY_DONE; | 1283 | return NOTIFY_DONE; |
1288 | 1284 | ||
1289 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); | 1285 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); |
1290 | netvsc_dev->vf_inject = false; | 1286 | netvsc_inject_disable(net_device_ctx); |
1291 | /* | ||
1292 | * Wait for currently active users to | ||
1293 | * drain out. | ||
1294 | */ | ||
1295 | |||
1296 | while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) | ||
1297 | udelay(50); | ||
1298 | netvsc_switch_datapath(ndev, false); | 1287 | netvsc_switch_datapath(ndev, false); |
1299 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); | 1288 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); |
1300 | rndis_filter_close(netvsc_dev); | 1289 | rndis_filter_close(netvsc_dev); |
1301 | netif_carrier_on(ndev); | 1290 | netif_carrier_on(ndev); |
1302 | /* | 1291 | |
1303 | * Notify peers. | 1292 | /* Now notify peers through netvsc device. */ |
1304 | */ | 1293 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); |
1305 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1306 | net_device_ctx->gwrk.netdev = ndev; | ||
1307 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1308 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1309 | 1294 | ||
1310 | return NOTIFY_OK; | 1295 | return NOTIFY_OK; |
1311 | } | 1296 | } |
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) | |||
1327 | 1312 | ||
1328 | net_device_ctx = netdev_priv(ndev); | 1313 | net_device_ctx = netdev_priv(ndev); |
1329 | netvsc_dev = net_device_ctx->nvdev; | 1314 | netvsc_dev = net_device_ctx->nvdev; |
1330 | if (netvsc_dev == NULL) | 1315 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1331 | return NOTIFY_DONE; | 1316 | return NOTIFY_DONE; |
1332 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); | 1317 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); |
1333 | 1318 | netvsc_inject_disable(net_device_ctx); | |
1334 | netvsc_dev->vf_netdev = NULL; | 1319 | net_device_ctx->vf_netdev = NULL; |
1335 | module_put(THIS_MODULE); | 1320 | module_put(THIS_MODULE); |
1336 | return NOTIFY_OK; | 1321 | return NOTIFY_OK; |
1337 | } | 1322 | } |
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev, | |||
1377 | 1362 | ||
1378 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 1363 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
1379 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | 1364 | INIT_WORK(&net_device_ctx->work, do_set_multicast); |
1380 | INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); | ||
1381 | 1365 | ||
1382 | spin_lock_init(&net_device_ctx->lock); | 1366 | spin_lock_init(&net_device_ctx->lock); |
1383 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); | 1367 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); |
1384 | 1368 | ||
1369 | atomic_set(&net_device_ctx->vf_use_cnt, 0); | ||
1370 | net_device_ctx->vf_netdev = NULL; | ||
1371 | net_device_ctx->vf_inject = false; | ||
1372 | |||
1385 | net->netdev_ops = &device_ops; | 1373 | net->netdev_ops = &device_ops; |
1386 | 1374 | ||
1387 | net->hw_features = NETVSC_HW_FEATURES; | 1375 | net->hw_features = NETVSC_HW_FEATURES; |
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this, | |||
1494 | { | 1482 | { |
1495 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); | 1483 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); |
1496 | 1484 | ||
1497 | /* Avoid Vlan, Bonding dev with same MAC registering as VF */ | 1485 | /* Avoid Vlan dev with same MAC registering as VF */ |
1498 | if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) | 1486 | if (event_dev->priv_flags & IFF_802_1Q_VLAN) |
1487 | return NOTIFY_DONE; | ||
1488 | |||
1489 | /* Avoid Bonding master dev with same MAC registering as VF */ | ||
1490 | if (event_dev->priv_flags & IFF_BONDING && | ||
1491 | event_dev->flags & IFF_MASTER) | ||
1499 | return NOTIFY_DONE; | 1492 | return NOTIFY_DONE; |
1500 | 1493 | ||
1501 | switch (event) { | 1494 | switch (event) { |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index d13e6e15d7b5..351e701eb043 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -270,6 +270,7 @@ struct macsec_dev { | |||
270 | struct pcpu_secy_stats __percpu *stats; | 270 | struct pcpu_secy_stats __percpu *stats; |
271 | struct list_head secys; | 271 | struct list_head secys; |
272 | struct gro_cells gro_cells; | 272 | struct gro_cells gro_cells; |
273 | unsigned int nest_level; | ||
273 | }; | 274 | }; |
274 | 275 | ||
275 | /** | 276 | /** |
@@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, | |||
2699 | 2700 | ||
2700 | #define MACSEC_FEATURES \ | 2701 | #define MACSEC_FEATURES \ |
2701 | (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) | 2702 | (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) |
2703 | static struct lock_class_key macsec_netdev_addr_lock_key; | ||
2704 | |||
2702 | static int macsec_dev_init(struct net_device *dev) | 2705 | static int macsec_dev_init(struct net_device *dev) |
2703 | { | 2706 | { |
2704 | struct macsec_dev *macsec = macsec_priv(dev); | 2707 | struct macsec_dev *macsec = macsec_priv(dev); |
@@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev) | |||
2910 | return macsec_priv(dev)->real_dev->ifindex; | 2913 | return macsec_priv(dev)->real_dev->ifindex; |
2911 | } | 2914 | } |
2912 | 2915 | ||
2916 | |||
2917 | static int macsec_get_nest_level(struct net_device *dev) | ||
2918 | { | ||
2919 | return macsec_priv(dev)->nest_level; | ||
2920 | } | ||
2921 | |||
2922 | |||
2913 | static const struct net_device_ops macsec_netdev_ops = { | 2923 | static const struct net_device_ops macsec_netdev_ops = { |
2914 | .ndo_init = macsec_dev_init, | 2924 | .ndo_init = macsec_dev_init, |
2915 | .ndo_uninit = macsec_dev_uninit, | 2925 | .ndo_uninit = macsec_dev_uninit, |
@@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = { | |||
2923 | .ndo_start_xmit = macsec_start_xmit, | 2933 | .ndo_start_xmit = macsec_start_xmit, |
2924 | .ndo_get_stats64 = macsec_get_stats64, | 2934 | .ndo_get_stats64 = macsec_get_stats64, |
2925 | .ndo_get_iflink = macsec_get_iflink, | 2935 | .ndo_get_iflink = macsec_get_iflink, |
2936 | .ndo_get_lock_subclass = macsec_get_nest_level, | ||
2926 | }; | 2937 | }; |
2927 | 2938 | ||
2928 | static const struct device_type macsec_type = { | 2939 | static const struct device_type macsec_type = { |
@@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec) | |||
3047 | } | 3058 | } |
3048 | } | 3059 | } |
3049 | 3060 | ||
3061 | static void macsec_common_dellink(struct net_device *dev, struct list_head *head) | ||
3062 | { | ||
3063 | struct macsec_dev *macsec = macsec_priv(dev); | ||
3064 | struct net_device *real_dev = macsec->real_dev; | ||
3065 | |||
3066 | unregister_netdevice_queue(dev, head); | ||
3067 | list_del_rcu(&macsec->secys); | ||
3068 | macsec_del_dev(macsec); | ||
3069 | netdev_upper_dev_unlink(real_dev, dev); | ||
3070 | |||
3071 | macsec_generation++; | ||
3072 | } | ||
3073 | |||
3050 | static void macsec_dellink(struct net_device *dev, struct list_head *head) | 3074 | static void macsec_dellink(struct net_device *dev, struct list_head *head) |
3051 | { | 3075 | { |
3052 | struct macsec_dev *macsec = macsec_priv(dev); | 3076 | struct macsec_dev *macsec = macsec_priv(dev); |
3053 | struct net_device *real_dev = macsec->real_dev; | 3077 | struct net_device *real_dev = macsec->real_dev; |
3054 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); | 3078 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); |
3055 | 3079 | ||
3056 | macsec_generation++; | 3080 | macsec_common_dellink(dev, head); |
3057 | 3081 | ||
3058 | unregister_netdevice_queue(dev, head); | ||
3059 | list_del_rcu(&macsec->secys); | ||
3060 | if (list_empty(&rxd->secys)) { | 3082 | if (list_empty(&rxd->secys)) { |
3061 | netdev_rx_handler_unregister(real_dev); | 3083 | netdev_rx_handler_unregister(real_dev); |
3062 | kfree(rxd); | 3084 | kfree(rxd); |
3063 | } | 3085 | } |
3064 | |||
3065 | macsec_del_dev(macsec); | ||
3066 | } | 3086 | } |
3067 | 3087 | ||
3068 | static int register_macsec_dev(struct net_device *real_dev, | 3088 | static int register_macsec_dev(struct net_device *real_dev, |
@@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3181 | 3201 | ||
3182 | dev_hold(real_dev); | 3202 | dev_hold(real_dev); |
3183 | 3203 | ||
3204 | macsec->nest_level = dev_get_nest_level(real_dev) + 1; | ||
3205 | netdev_lockdep_set_classes(dev); | ||
3206 | lockdep_set_class_and_subclass(&dev->addr_list_lock, | ||
3207 | &macsec_netdev_addr_lock_key, | ||
3208 | macsec_get_nest_level(dev)); | ||
3209 | |||
3210 | err = netdev_upper_dev_link(real_dev, dev); | ||
3211 | if (err < 0) | ||
3212 | goto unregister; | ||
3213 | |||
3184 | /* need to be already registered so that ->init has run and | 3214 | /* need to be already registered so that ->init has run and |
3185 | * the MAC addr is set | 3215 | * the MAC addr is set |
3186 | */ | 3216 | */ |
@@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3193 | 3223 | ||
3194 | if (rx_handler && sci_exists(real_dev, sci)) { | 3224 | if (rx_handler && sci_exists(real_dev, sci)) { |
3195 | err = -EBUSY; | 3225 | err = -EBUSY; |
3196 | goto unregister; | 3226 | goto unlink; |
3197 | } | 3227 | } |
3198 | 3228 | ||
3199 | err = macsec_add_dev(dev, sci, icv_len); | 3229 | err = macsec_add_dev(dev, sci, icv_len); |
3200 | if (err) | 3230 | if (err) |
3201 | goto unregister; | 3231 | goto unlink; |
3202 | 3232 | ||
3203 | if (data) | 3233 | if (data) |
3204 | macsec_changelink_common(dev, data); | 3234 | macsec_changelink_common(dev, data); |
@@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3213 | 3243 | ||
3214 | del_dev: | 3244 | del_dev: |
3215 | macsec_del_dev(macsec); | 3245 | macsec_del_dev(macsec); |
3246 | unlink: | ||
3247 | netdev_upper_dev_unlink(real_dev, dev); | ||
3216 | unregister: | 3248 | unregister: |
3217 | unregister_netdevice(dev); | 3249 | unregister_netdevice(dev); |
3218 | return err; | 3250 | return err; |
@@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, | |||
3382 | 3414 | ||
3383 | rxd = macsec_data_rtnl(real_dev); | 3415 | rxd = macsec_data_rtnl(real_dev); |
3384 | list_for_each_entry_safe(m, n, &rxd->secys, secys) { | 3416 | list_for_each_entry_safe(m, n, &rxd->secys, secys) { |
3385 | macsec_dellink(m->secy.netdev, &head); | 3417 | macsec_common_dellink(m->secy.netdev, &head); |
3386 | } | 3418 | } |
3419 | |||
3420 | netdev_rx_handler_unregister(real_dev); | ||
3421 | kfree(rxd); | ||
3422 | |||
3387 | unregister_netdevice_many(&head); | 3423 | unregister_netdevice_many(&head); |
3388 | break; | 3424 | break; |
3389 | } | 3425 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cd9b53834bf6..3234fcdea317 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1315 | vlan->dev = dev; | 1315 | vlan->dev = dev; |
1316 | vlan->port = port; | 1316 | vlan->port = port; |
1317 | vlan->set_features = MACVLAN_FEATURES; | 1317 | vlan->set_features = MACVLAN_FEATURES; |
1318 | vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; | 1318 | vlan->nest_level = dev_get_nest_level(lowerdev) + 1; |
1319 | 1319 | ||
1320 | vlan->mode = MACVLAN_MODE_VEPA; | 1320 | vlan->mode = MACVLAN_MODE_VEPA; |
1321 | if (data && data[IFLA_MACVLAN_MODE]) | 1321 | if (data && data[IFLA_MACVLAN_MODE]) |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a38c0dac514b..070e3290aa6e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q) | |||
275 | rtnl_unlock(); | 275 | rtnl_unlock(); |
276 | 276 | ||
277 | synchronize_rcu(); | 277 | synchronize_rcu(); |
278 | skb_array_cleanup(&q->skb_array); | ||
279 | sock_put(&q->sk); | 278 | sock_put(&q->sk); |
280 | } | 279 | } |
281 | 280 | ||
@@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk) | |||
533 | static void macvtap_sock_destruct(struct sock *sk) | 532 | static void macvtap_sock_destruct(struct sock *sk) |
534 | { | 533 | { |
535 | struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); | 534 | struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); |
536 | struct sk_buff *skb; | ||
537 | 535 | ||
538 | while ((skb = skb_array_consume(&q->skb_array)) != NULL) | 536 | skb_array_cleanup(&q->skb_array); |
539 | kfree_skb(skb); | ||
540 | } | 537 | } |
541 | 538 | ||
542 | static int macvtap_open(struct inode *inode, struct file *file) | 539 | static int macvtap_open(struct inode *inode, struct file *file) |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1882d9828c99..053e87905b94 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev, | |||
677 | data[i] = kszphy_get_stat(phydev, i); | 677 | data[i] = kszphy_get_stat(phydev, i); |
678 | } | 678 | } |
679 | 679 | ||
680 | static int kszphy_resume(struct phy_device *phydev) | 680 | static int kszphy_suspend(struct phy_device *phydev) |
681 | { | 681 | { |
682 | int value; | 682 | /* Disable PHY Interrupts */ |
683 | if (phy_interrupt_is_valid(phydev)) { | ||
684 | phydev->interrupts = PHY_INTERRUPT_DISABLED; | ||
685 | if (phydev->drv->config_intr) | ||
686 | phydev->drv->config_intr(phydev); | ||
687 | } | ||
683 | 688 | ||
684 | mutex_lock(&phydev->lock); | 689 | return genphy_suspend(phydev); |
690 | } | ||
685 | 691 | ||
686 | value = phy_read(phydev, MII_BMCR); | 692 | static int kszphy_resume(struct phy_device *phydev) |
687 | phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); | 693 | { |
694 | genphy_resume(phydev); | ||
688 | 695 | ||
689 | kszphy_config_intr(phydev); | 696 | /* Enable PHY Interrupts */ |
690 | mutex_unlock(&phydev->lock); | 697 | if (phy_interrupt_is_valid(phydev)) { |
698 | phydev->interrupts = PHY_INTERRUPT_ENABLED; | ||
699 | if (phydev->drv->config_intr) | ||
700 | phydev->drv->config_intr(phydev); | ||
701 | } | ||
691 | 702 | ||
692 | return 0; | 703 | return 0; |
693 | } | 704 | } |
@@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = { | |||
900 | .get_sset_count = kszphy_get_sset_count, | 911 | .get_sset_count = kszphy_get_sset_count, |
901 | .get_strings = kszphy_get_strings, | 912 | .get_strings = kszphy_get_strings, |
902 | .get_stats = kszphy_get_stats, | 913 | .get_stats = kszphy_get_stats, |
903 | .suspend = genphy_suspend, | 914 | .suspend = kszphy_suspend, |
904 | .resume = kszphy_resume, | 915 | .resume = kszphy_resume, |
905 | }, { | 916 | }, { |
906 | .phy_id = PHY_ID_KSZ8061, | 917 | .phy_id = PHY_ID_KSZ8061, |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index da4e3d6632f6..c0dda6fc0921 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, | |||
1811 | fl4.flowi4_mark = skb->mark; | 1811 | fl4.flowi4_mark = skb->mark; |
1812 | fl4.flowi4_proto = IPPROTO_UDP; | 1812 | fl4.flowi4_proto = IPPROTO_UDP; |
1813 | fl4.daddr = daddr; | 1813 | fl4.daddr = daddr; |
1814 | fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; | 1814 | fl4.saddr = *saddr; |
1815 | 1815 | ||
1816 | rt = ip_route_output_key(vxlan->net, &fl4); | 1816 | rt = ip_route_output_key(vxlan->net, &fl4); |
1817 | if (!IS_ERR(rt)) { | 1817 | if (!IS_ERR(rt)) { |
@@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, | |||
1847 | memset(&fl6, 0, sizeof(fl6)); | 1847 | memset(&fl6, 0, sizeof(fl6)); |
1848 | fl6.flowi6_oif = oif; | 1848 | fl6.flowi6_oif = oif; |
1849 | fl6.daddr = *daddr; | 1849 | fl6.daddr = *daddr; |
1850 | fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; | 1850 | fl6.saddr = *saddr; |
1851 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); | 1851 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); |
1852 | fl6.flowi6_mark = skb->mark; | 1852 | fl6.flowi6_mark = skb->mark; |
1853 | fl6.flowi6_proto = IPPROTO_UDP; | 1853 | fl6.flowi6_proto = IPPROTO_UDP; |
@@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1920 | struct rtable *rt = NULL; | 1920 | struct rtable *rt = NULL; |
1921 | const struct iphdr *old_iph; | 1921 | const struct iphdr *old_iph; |
1922 | union vxlan_addr *dst; | 1922 | union vxlan_addr *dst; |
1923 | union vxlan_addr remote_ip; | 1923 | union vxlan_addr remote_ip, local_ip; |
1924 | union vxlan_addr *src; | ||
1924 | struct vxlan_metadata _md; | 1925 | struct vxlan_metadata _md; |
1925 | struct vxlan_metadata *md = &_md; | 1926 | struct vxlan_metadata *md = &_md; |
1926 | __be16 src_port = 0, dst_port; | 1927 | __be16 src_port = 0, dst_port; |
@@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1938 | dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; | 1939 | dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; |
1939 | vni = rdst->remote_vni; | 1940 | vni = rdst->remote_vni; |
1940 | dst = &rdst->remote_ip; | 1941 | dst = &rdst->remote_ip; |
1942 | src = &vxlan->cfg.saddr; | ||
1941 | dst_cache = &rdst->dst_cache; | 1943 | dst_cache = &rdst->dst_cache; |
1942 | } else { | 1944 | } else { |
1943 | if (!info) { | 1945 | if (!info) { |
@@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1948 | dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; | 1950 | dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; |
1949 | vni = vxlan_tun_id_to_vni(info->key.tun_id); | 1951 | vni = vxlan_tun_id_to_vni(info->key.tun_id); |
1950 | remote_ip.sa.sa_family = ip_tunnel_info_af(info); | 1952 | remote_ip.sa.sa_family = ip_tunnel_info_af(info); |
1951 | if (remote_ip.sa.sa_family == AF_INET) | 1953 | if (remote_ip.sa.sa_family == AF_INET) { |
1952 | remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; | 1954 | remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; |
1953 | else | 1955 | local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; |
1956 | } else { | ||
1954 | remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; | 1957 | remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; |
1958 | local_ip.sin6.sin6_addr = info->key.u.ipv6.src; | ||
1959 | } | ||
1955 | dst = &remote_ip; | 1960 | dst = &remote_ip; |
1961 | src = &local_ip; | ||
1956 | dst_cache = &info->dst_cache; | 1962 | dst_cache = &info->dst_cache; |
1957 | } | 1963 | } |
1958 | 1964 | ||
@@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1992 | } | 1998 | } |
1993 | 1999 | ||
1994 | if (dst->sa.sa_family == AF_INET) { | 2000 | if (dst->sa.sa_family == AF_INET) { |
1995 | __be32 saddr; | ||
1996 | |||
1997 | if (!vxlan->vn4_sock) | 2001 | if (!vxlan->vn4_sock) |
1998 | goto drop; | 2002 | goto drop; |
1999 | sk = vxlan->vn4_sock->sock->sk; | 2003 | sk = vxlan->vn4_sock->sock->sk; |
2000 | 2004 | ||
2001 | rt = vxlan_get_route(vxlan, skb, | 2005 | rt = vxlan_get_route(vxlan, skb, |
2002 | rdst ? rdst->remote_ifindex : 0, tos, | 2006 | rdst ? rdst->remote_ifindex : 0, tos, |
2003 | dst->sin.sin_addr.s_addr, &saddr, | 2007 | dst->sin.sin_addr.s_addr, |
2008 | &src->sin.sin_addr.s_addr, | ||
2004 | dst_cache, info); | 2009 | dst_cache, info); |
2005 | if (IS_ERR(rt)) { | 2010 | if (IS_ERR(rt)) { |
2006 | netdev_dbg(dev, "no route to %pI4\n", | 2011 | netdev_dbg(dev, "no route to %pI4\n", |
@@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2017 | } | 2022 | } |
2018 | 2023 | ||
2019 | /* Bypass encapsulation if the destination is local */ | 2024 | /* Bypass encapsulation if the destination is local */ |
2020 | if (rt->rt_flags & RTCF_LOCAL && | 2025 | if (!info && rt->rt_flags & RTCF_LOCAL && |
2021 | !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { | 2026 | !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { |
2022 | struct vxlan_dev *dst_vxlan; | 2027 | struct vxlan_dev *dst_vxlan; |
2023 | 2028 | ||
@@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2043 | if (err < 0) | 2048 | if (err < 0) |
2044 | goto xmit_tx_error; | 2049 | goto xmit_tx_error; |
2045 | 2050 | ||
2046 | udp_tunnel_xmit_skb(rt, sk, skb, saddr, | 2051 | udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, |
2047 | dst->sin.sin_addr.s_addr, tos, ttl, df, | 2052 | dst->sin.sin_addr.s_addr, tos, ttl, df, |
2048 | src_port, dst_port, xnet, !udp_sum); | 2053 | src_port, dst_port, xnet, !udp_sum); |
2049 | #if IS_ENABLED(CONFIG_IPV6) | 2054 | #if IS_ENABLED(CONFIG_IPV6) |
2050 | } else { | 2055 | } else { |
2051 | struct dst_entry *ndst; | 2056 | struct dst_entry *ndst; |
2052 | struct in6_addr saddr; | ||
2053 | u32 rt6i_flags; | 2057 | u32 rt6i_flags; |
2054 | 2058 | ||
2055 | if (!vxlan->vn6_sock) | 2059 | if (!vxlan->vn6_sock) |
@@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2058 | 2062 | ||
2059 | ndst = vxlan6_get_route(vxlan, skb, | 2063 | ndst = vxlan6_get_route(vxlan, skb, |
2060 | rdst ? rdst->remote_ifindex : 0, tos, | 2064 | rdst ? rdst->remote_ifindex : 0, tos, |
2061 | label, &dst->sin6.sin6_addr, &saddr, | 2065 | label, &dst->sin6.sin6_addr, |
2066 | &src->sin6.sin6_addr, | ||
2062 | dst_cache, info); | 2067 | dst_cache, info); |
2063 | if (IS_ERR(ndst)) { | 2068 | if (IS_ERR(ndst)) { |
2064 | netdev_dbg(dev, "no route to %pI6\n", | 2069 | netdev_dbg(dev, "no route to %pI6\n", |
@@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2077 | 2082 | ||
2078 | /* Bypass encapsulation if the destination is local */ | 2083 | /* Bypass encapsulation if the destination is local */ |
2079 | rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; | 2084 | rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; |
2080 | if (rt6i_flags & RTF_LOCAL && | 2085 | if (!info && rt6i_flags & RTF_LOCAL && |
2081 | !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { | 2086 | !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { |
2082 | struct vxlan_dev *dst_vxlan; | 2087 | struct vxlan_dev *dst_vxlan; |
2083 | 2088 | ||
@@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2104 | return; | 2109 | return; |
2105 | } | 2110 | } |
2106 | udp_tunnel6_xmit_skb(ndst, sk, skb, dev, | 2111 | udp_tunnel6_xmit_skb(ndst, sk, skb, dev, |
2107 | &saddr, &dst->sin6.sin6_addr, tos, ttl, | 2112 | &src->sin6.sin6_addr, |
2113 | &dst->sin6.sin6_addr, tos, ttl, | ||
2108 | label, src_port, dst_port, !udp_sum); | 2114 | label, src_port, dst_port, !udp_sum); |
2109 | #endif | 2115 | #endif |
2110 | } | 2116 | } |
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 1d689169da76..9e1f2d9c9865 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c | |||
@@ -5700,10 +5700,11 @@ out: | |||
5700 | mutex_unlock(&wl->mutex); | 5700 | mutex_unlock(&wl->mutex); |
5701 | } | 5701 | } |
5702 | 5702 | ||
5703 | static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) | 5703 | static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw, |
5704 | struct ieee80211_sta *sta) | ||
5704 | { | 5705 | { |
5705 | struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; | 5706 | struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; |
5706 | struct wl1271 *wl = wl_sta->wl; | 5707 | struct wl1271 *wl = hw->priv; |
5707 | u8 hlid = wl_sta->hlid; | 5708 | u8 hlid = wl_sta->hlid; |
5708 | 5709 | ||
5709 | /* return in units of Kbps */ | 5710 | /* return in units of Kbps */ |
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 88e91666f145..368795aad5c9 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c | |||
@@ -1269,6 +1269,7 @@ static int btt_blk_init(struct btt *btt) | |||
1269 | } | 1269 | } |
1270 | } | 1270 | } |
1271 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); | 1271 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); |
1272 | btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; | ||
1272 | revalidate_disk(btt->btt_disk); | 1273 | revalidate_disk(btt->btt_disk); |
1273 | 1274 | ||
1274 | return 0; | 1275 | return 0; |
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 3fa7919f94a8..97dd2925ed6e 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c | |||
@@ -140,10 +140,30 @@ static ssize_t namespace_store(struct device *dev, | |||
140 | } | 140 | } |
141 | static DEVICE_ATTR_RW(namespace); | 141 | static DEVICE_ATTR_RW(namespace); |
142 | 142 | ||
143 | static ssize_t size_show(struct device *dev, | ||
144 | struct device_attribute *attr, char *buf) | ||
145 | { | ||
146 | struct nd_btt *nd_btt = to_nd_btt(dev); | ||
147 | ssize_t rc; | ||
148 | |||
149 | device_lock(dev); | ||
150 | if (dev->driver) | ||
151 | rc = sprintf(buf, "%llu\n", nd_btt->size); | ||
152 | else { | ||
153 | /* no size to convey if the btt instance is disabled */ | ||
154 | rc = -ENXIO; | ||
155 | } | ||
156 | device_unlock(dev); | ||
157 | |||
158 | return rc; | ||
159 | } | ||
160 | static DEVICE_ATTR_RO(size); | ||
161 | |||
143 | static struct attribute *nd_btt_attributes[] = { | 162 | static struct attribute *nd_btt_attributes[] = { |
144 | &dev_attr_sector_size.attr, | 163 | &dev_attr_sector_size.attr, |
145 | &dev_attr_namespace.attr, | 164 | &dev_attr_namespace.attr, |
146 | &dev_attr_uuid.attr, | 165 | &dev_attr_uuid.attr, |
166 | &dev_attr_size.attr, | ||
147 | NULL, | 167 | NULL, |
148 | }; | 168 | }; |
149 | 169 | ||
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 40476399d227..8024a0ef86d3 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h | |||
@@ -143,6 +143,7 @@ struct nd_btt { | |||
143 | struct nd_namespace_common *ndns; | 143 | struct nd_namespace_common *ndns; |
144 | struct btt *btt; | 144 | struct btt *btt; |
145 | unsigned long lbasize; | 145 | unsigned long lbasize; |
146 | u64 size; | ||
146 | u8 *uuid; | 147 | u8 *uuid; |
147 | int id; | 148 | int id; |
148 | }; | 149 | }; |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7ff2e820bbf4..2feacc70bf61 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request); | |||
81 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | 81 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
82 | enum nvme_ctrl_state new_state) | 82 | enum nvme_ctrl_state new_state) |
83 | { | 83 | { |
84 | enum nvme_ctrl_state old_state = ctrl->state; | 84 | enum nvme_ctrl_state old_state; |
85 | bool changed = false; | 85 | bool changed = false; |
86 | 86 | ||
87 | spin_lock_irq(&ctrl->lock); | 87 | spin_lock_irq(&ctrl->lock); |
88 | |||
89 | old_state = ctrl->state; | ||
88 | switch (new_state) { | 90 | switch (new_state) { |
89 | case NVME_CTRL_LIVE: | 91 | case NVME_CTRL_LIVE: |
90 | switch (old_state) { | 92 | switch (old_state) { |
@@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
140 | default: | 142 | default: |
141 | break; | 143 | break; |
142 | } | 144 | } |
143 | spin_unlock_irq(&ctrl->lock); | ||
144 | 145 | ||
145 | if (changed) | 146 | if (changed) |
146 | ctrl->state = new_state; | 147 | ctrl->state = new_state; |
147 | 148 | ||
149 | spin_unlock_irq(&ctrl->lock); | ||
150 | |||
148 | return changed; | 151 | return changed; |
149 | } | 152 | } |
150 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); | 153 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); |
@@ -608,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, | |||
608 | 611 | ||
609 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, | 612 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
610 | NVME_QID_ANY, 0, 0); | 613 | NVME_QID_ANY, 0, 0); |
611 | if (ret >= 0) | 614 | if (ret >= 0 && result) |
612 | *result = le32_to_cpu(cqe.result); | 615 | *result = le32_to_cpu(cqe.result); |
613 | return ret; | 616 | return ret; |
614 | } | 617 | } |
@@ -628,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, | |||
628 | 631 | ||
629 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, | 632 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, |
630 | NVME_QID_ANY, 0, 0); | 633 | NVME_QID_ANY, 0, 0); |
631 | if (ret >= 0) | 634 | if (ret >= 0 && result) |
632 | *result = le32_to_cpu(cqe.result); | 635 | *result = le32_to_cpu(cqe.result); |
633 | return ret; | 636 | return ret; |
634 | } | 637 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d7c33f9361aa..8dcf5a960951 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1543,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) | |||
1543 | reinit_completion(&dev->ioq_wait); | 1543 | reinit_completion(&dev->ioq_wait); |
1544 | retry: | 1544 | retry: |
1545 | timeout = ADMIN_TIMEOUT; | 1545 | timeout = ADMIN_TIMEOUT; |
1546 | for (; i > 0; i--) { | 1546 | for (; i > 0; i--, sent++) |
1547 | struct nvme_queue *nvmeq = dev->queues[i]; | 1547 | if (nvme_delete_queue(dev->queues[i], opcode)) |
1548 | |||
1549 | if (!pass) | ||
1550 | nvme_suspend_queue(nvmeq); | ||
1551 | if (nvme_delete_queue(nvmeq, opcode)) | ||
1552 | break; | 1548 | break; |
1553 | ++sent; | 1549 | |
1554 | } | ||
1555 | while (sent--) { | 1550 | while (sent--) { |
1556 | timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); | 1551 | timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout); |
1557 | if (timeout == 0) | 1552 | if (timeout == 0) |
@@ -1693,11 +1688,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) | |||
1693 | nvme_stop_queues(&dev->ctrl); | 1688 | nvme_stop_queues(&dev->ctrl); |
1694 | csts = readl(dev->bar + NVME_REG_CSTS); | 1689 | csts = readl(dev->bar + NVME_REG_CSTS); |
1695 | } | 1690 | } |
1691 | |||
1692 | for (i = dev->queue_count - 1; i > 0; i--) | ||
1693 | nvme_suspend_queue(dev->queues[i]); | ||
1694 | |||
1696 | if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { | 1695 | if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { |
1697 | for (i = dev->queue_count - 1; i >= 0; i--) { | 1696 | nvme_suspend_queue(dev->queues[0]); |
1698 | struct nvme_queue *nvmeq = dev->queues[i]; | ||
1699 | nvme_suspend_queue(nvmeq); | ||
1700 | } | ||
1701 | } else { | 1697 | } else { |
1702 | nvme_disable_io_queues(dev); | 1698 | nvme_disable_io_queues(dev); |
1703 | nvme_disable_admin_queue(dev, shutdown); | 1699 | nvme_disable_admin_queue(dev, shutdown); |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3e3ce2b0424e..8d2875b4c56d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -12,13 +12,11 @@ | |||
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | #include <linux/delay.h> | ||
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
19 | #include <linux/err.h> | 18 | #include <linux/err.h> |
20 | #include <linux/string.h> | 19 | #include <linux/string.h> |
21 | #include <linux/jiffies.h> | ||
22 | #include <linux/atomic.h> | 20 | #include <linux/atomic.h> |
23 | #include <linux/blk-mq.h> | 21 | #include <linux/blk-mq.h> |
24 | #include <linux/types.h> | 22 | #include <linux/types.h> |
@@ -26,7 +24,6 @@ | |||
26 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
27 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
28 | #include <linux/nvme.h> | 26 | #include <linux/nvme.h> |
29 | #include <linux/t10-pi.h> | ||
30 | #include <asm/unaligned.h> | 27 | #include <asm/unaligned.h> |
31 | 28 | ||
32 | #include <rdma/ib_verbs.h> | 29 | #include <rdma/ib_verbs.h> |
@@ -169,7 +166,6 @@ MODULE_PARM_DESC(register_always, | |||
169 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, | 166 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, |
170 | struct rdma_cm_event *event); | 167 | struct rdma_cm_event *event); |
171 | static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); | 168 | static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); |
172 | static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl); | ||
173 | 169 | ||
174 | /* XXX: really should move to a generic header sooner or later.. */ | 170 | /* XXX: really should move to a generic header sooner or later.. */ |
175 | static inline void put_unaligned_le24(u32 val, u8 *p) | 171 | static inline void put_unaligned_le24(u32 val, u8 *p) |
@@ -687,11 +683,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) | |||
687 | list_del(&ctrl->list); | 683 | list_del(&ctrl->list); |
688 | mutex_unlock(&nvme_rdma_ctrl_mutex); | 684 | mutex_unlock(&nvme_rdma_ctrl_mutex); |
689 | 685 | ||
690 | if (ctrl->ctrl.tagset) { | ||
691 | blk_cleanup_queue(ctrl->ctrl.connect_q); | ||
692 | blk_mq_free_tag_set(&ctrl->tag_set); | ||
693 | nvme_rdma_dev_put(ctrl->device); | ||
694 | } | ||
695 | kfree(ctrl->queues); | 686 | kfree(ctrl->queues); |
696 | nvmf_free_options(nctrl->opts); | 687 | nvmf_free_options(nctrl->opts); |
697 | free_ctrl: | 688 | free_ctrl: |
@@ -748,8 +739,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
748 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 739 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
749 | WARN_ON_ONCE(!changed); | 740 | WARN_ON_ONCE(!changed); |
750 | 741 | ||
751 | if (ctrl->queue_count > 1) | 742 | if (ctrl->queue_count > 1) { |
752 | nvme_start_queues(&ctrl->ctrl); | 743 | nvme_start_queues(&ctrl->ctrl); |
744 | nvme_queue_scan(&ctrl->ctrl); | ||
745 | nvme_queue_async_events(&ctrl->ctrl); | ||
746 | } | ||
753 | 747 | ||
754 | dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); | 748 | dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); |
755 | 749 | ||
@@ -1269,7 +1263,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) | |||
1269 | { | 1263 | { |
1270 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; | 1264 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; |
1271 | struct rdma_conn_param param = { }; | 1265 | struct rdma_conn_param param = { }; |
1272 | struct nvme_rdma_cm_req priv; | 1266 | struct nvme_rdma_cm_req priv = { }; |
1273 | int ret; | 1267 | int ret; |
1274 | 1268 | ||
1275 | param.qp_num = queue->qp->qp_num; | 1269 | param.qp_num = queue->qp->qp_num; |
@@ -1318,37 +1312,39 @@ out_destroy_queue_ib: | |||
1318 | * that caught the event. Since we hold the callout until the controller | 1312 | * that caught the event. Since we hold the callout until the controller |
1319 | * deletion is completed, we'll deadlock if the controller deletion will | 1313 | * deletion is completed, we'll deadlock if the controller deletion will |
1320 | * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership | 1314 | * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership |
1321 | * of destroying this queue before-hand, destroy the queue resources | 1315 | * of destroying this queue before-hand, destroy the queue resources, |
1322 | * after the controller deletion completed with the exception of destroying | 1316 | * then queue the controller deletion which won't destroy this queue and |
1323 | * the cm_id implicitely by returning a non-zero rc to the callout. | 1317 | * we destroy the cm_id implicitely by returning a non-zero rc to the callout. |
1324 | */ | 1318 | */ |
1325 | static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) | 1319 | static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) |
1326 | { | 1320 | { |
1327 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; | 1321 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; |
1328 | int ret, ctrl_deleted = 0; | 1322 | int ret; |
1329 | 1323 | ||
1330 | /* First disable the queue so ctrl delete won't free it */ | 1324 | /* Own the controller deletion */ |
1331 | if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) | 1325 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) |
1332 | goto out; | 1326 | return 0; |
1333 | 1327 | ||
1334 | /* delete the controller */ | 1328 | dev_warn(ctrl->ctrl.device, |
1335 | ret = __nvme_rdma_del_ctrl(ctrl); | 1329 | "Got rdma device removal event, deleting ctrl\n"); |
1336 | if (!ret) { | ||
1337 | dev_warn(ctrl->ctrl.device, | ||
1338 | "Got rdma device removal event, deleting ctrl\n"); | ||
1339 | flush_work(&ctrl->delete_work); | ||
1340 | 1330 | ||
1341 | /* Return non-zero so the cm_id will destroy implicitly */ | 1331 | /* Get rid of reconnect work if its running */ |
1342 | ctrl_deleted = 1; | 1332 | cancel_delayed_work_sync(&ctrl->reconnect_work); |
1343 | 1333 | ||
1334 | /* Disable the queue so ctrl delete won't free it */ | ||
1335 | if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) { | ||
1344 | /* Free this queue ourselves */ | 1336 | /* Free this queue ourselves */ |
1345 | rdma_disconnect(queue->cm_id); | 1337 | nvme_rdma_stop_queue(queue); |
1346 | ib_drain_qp(queue->qp); | ||
1347 | nvme_rdma_destroy_queue_ib(queue); | 1338 | nvme_rdma_destroy_queue_ib(queue); |
1339 | |||
1340 | /* Return non-zero so the cm_id will destroy implicitly */ | ||
1341 | ret = 1; | ||
1348 | } | 1342 | } |
1349 | 1343 | ||
1350 | out: | 1344 | /* Queue controller deletion */ |
1351 | return ctrl_deleted; | 1345 | queue_work(nvme_rdma_wq, &ctrl->delete_work); |
1346 | flush_work(&ctrl->delete_work); | ||
1347 | return ret; | ||
1352 | } | 1348 | } |
1353 | 1349 | ||
1354 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, | 1350 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, |
@@ -1648,7 +1644,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) | |||
1648 | nvme_rdma_free_io_queues(ctrl); | 1644 | nvme_rdma_free_io_queues(ctrl); |
1649 | } | 1645 | } |
1650 | 1646 | ||
1651 | if (ctrl->ctrl.state == NVME_CTRL_LIVE) | 1647 | if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags)) |
1652 | nvme_shutdown_ctrl(&ctrl->ctrl); | 1648 | nvme_shutdown_ctrl(&ctrl->ctrl); |
1653 | 1649 | ||
1654 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); | 1650 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); |
@@ -1657,15 +1653,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) | |||
1657 | nvme_rdma_destroy_admin_queue(ctrl); | 1653 | nvme_rdma_destroy_admin_queue(ctrl); |
1658 | } | 1654 | } |
1659 | 1655 | ||
1656 | static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) | ||
1657 | { | ||
1658 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
1659 | if (shutdown) | ||
1660 | nvme_rdma_shutdown_ctrl(ctrl); | ||
1661 | |||
1662 | if (ctrl->ctrl.tagset) { | ||
1663 | blk_cleanup_queue(ctrl->ctrl.connect_q); | ||
1664 | blk_mq_free_tag_set(&ctrl->tag_set); | ||
1665 | nvme_rdma_dev_put(ctrl->device); | ||
1666 | } | ||
1667 | |||
1668 | nvme_put_ctrl(&ctrl->ctrl); | ||
1669 | } | ||
1670 | |||
1660 | static void nvme_rdma_del_ctrl_work(struct work_struct *work) | 1671 | static void nvme_rdma_del_ctrl_work(struct work_struct *work) |
1661 | { | 1672 | { |
1662 | struct nvme_rdma_ctrl *ctrl = container_of(work, | 1673 | struct nvme_rdma_ctrl *ctrl = container_of(work, |
1663 | struct nvme_rdma_ctrl, delete_work); | 1674 | struct nvme_rdma_ctrl, delete_work); |
1664 | 1675 | ||
1665 | nvme_remove_namespaces(&ctrl->ctrl); | 1676 | __nvme_rdma_remove_ctrl(ctrl, true); |
1666 | nvme_rdma_shutdown_ctrl(ctrl); | ||
1667 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
1668 | nvme_put_ctrl(&ctrl->ctrl); | ||
1669 | } | 1677 | } |
1670 | 1678 | ||
1671 | static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) | 1679 | static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) |
@@ -1698,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work) | |||
1698 | struct nvme_rdma_ctrl *ctrl = container_of(work, | 1706 | struct nvme_rdma_ctrl *ctrl = container_of(work, |
1699 | struct nvme_rdma_ctrl, delete_work); | 1707 | struct nvme_rdma_ctrl, delete_work); |
1700 | 1708 | ||
1701 | nvme_remove_namespaces(&ctrl->ctrl); | 1709 | __nvme_rdma_remove_ctrl(ctrl, false); |
1702 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
1703 | nvme_put_ctrl(&ctrl->ctrl); | ||
1704 | } | 1710 | } |
1705 | 1711 | ||
1706 | static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | 1712 | static void nvme_rdma_reset_ctrl_work(struct work_struct *work) |
@@ -1739,6 +1745,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | |||
1739 | if (ctrl->queue_count > 1) { | 1745 | if (ctrl->queue_count > 1) { |
1740 | nvme_start_queues(&ctrl->ctrl); | 1746 | nvme_start_queues(&ctrl->ctrl); |
1741 | nvme_queue_scan(&ctrl->ctrl); | 1747 | nvme_queue_scan(&ctrl->ctrl); |
1748 | nvme_queue_async_events(&ctrl->ctrl); | ||
1742 | } | 1749 | } |
1743 | 1750 | ||
1744 | return; | 1751 | return; |
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 2fac17a5ad53..47c564b5a289 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
@@ -13,7 +13,6 @@ | |||
13 | */ | 13 | */ |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/random.h> | ||
17 | #include <generated/utsrelease.h> | 16 | #include <generated/utsrelease.h> |
18 | #include "nvmet.h" | 17 | #include "nvmet.h" |
19 | 18 | ||
@@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) | |||
83 | { | 82 | { |
84 | struct nvmet_ctrl *ctrl = req->sq->ctrl; | 83 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
85 | struct nvme_id_ctrl *id; | 84 | struct nvme_id_ctrl *id; |
86 | u64 serial; | ||
87 | u16 status = 0; | 85 | u16 status = 0; |
88 | 86 | ||
89 | id = kzalloc(sizeof(*id), GFP_KERNEL); | 87 | id = kzalloc(sizeof(*id), GFP_KERNEL); |
@@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) | |||
96 | id->vid = 0; | 94 | id->vid = 0; |
97 | id->ssvid = 0; | 95 | id->ssvid = 0; |
98 | 96 | ||
99 | /* generate a random serial number as our controllers are ephemeral: */ | ||
100 | get_random_bytes(&serial, sizeof(serial)); | ||
101 | memset(id->sn, ' ', sizeof(id->sn)); | 97 | memset(id->sn, ' ', sizeof(id->sn)); |
102 | snprintf(id->sn, sizeof(id->sn), "%llx", serial); | 98 | snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); |
103 | 99 | ||
104 | memset(id->mn, ' ', sizeof(id->mn)); | 100 | memset(id->mn, ' ', sizeof(id->mn)); |
105 | strncpy((char *)id->mn, "Linux", sizeof(id->mn)); | 101 | strncpy((char *)id->mn, "Linux", sizeof(id->mn)); |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 8a891ca53367..6559d5afa7bf 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/random.h> | ||
16 | #include "nvmet.h" | 17 | #include "nvmet.h" |
17 | 18 | ||
18 | static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; | 19 | static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; |
@@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, | |||
728 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); | 729 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); |
729 | memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); | 730 | memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); |
730 | 731 | ||
732 | /* generate a random serial number as our controllers are ephemeral: */ | ||
733 | get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); | ||
734 | |||
731 | kref_init(&ctrl->ref); | 735 | kref_init(&ctrl->ref); |
732 | ctrl->subsys = subsys; | 736 | ctrl->subsys = subsys; |
733 | 737 | ||
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 94e782987cc9..7affd40a6b33 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work) | |||
414 | struct nvme_loop_ctrl *ctrl = container_of(work, | 414 | struct nvme_loop_ctrl *ctrl = container_of(work, |
415 | struct nvme_loop_ctrl, delete_work); | 415 | struct nvme_loop_ctrl, delete_work); |
416 | 416 | ||
417 | nvme_remove_namespaces(&ctrl->ctrl); | ||
418 | nvme_loop_shutdown_ctrl(ctrl); | ||
419 | nvme_uninit_ctrl(&ctrl->ctrl); | 417 | nvme_uninit_ctrl(&ctrl->ctrl); |
418 | nvme_loop_shutdown_ctrl(ctrl); | ||
420 | nvme_put_ctrl(&ctrl->ctrl); | 419 | nvme_put_ctrl(&ctrl->ctrl); |
421 | } | 420 | } |
422 | 421 | ||
@@ -501,7 +500,6 @@ out_free_queues: | |||
501 | nvme_loop_destroy_admin_queue(ctrl); | 500 | nvme_loop_destroy_admin_queue(ctrl); |
502 | out_disable: | 501 | out_disable: |
503 | dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); | 502 | dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); |
504 | nvme_remove_namespaces(&ctrl->ctrl); | ||
505 | nvme_uninit_ctrl(&ctrl->ctrl); | 503 | nvme_uninit_ctrl(&ctrl->ctrl); |
506 | nvme_put_ctrl(&ctrl->ctrl); | 504 | nvme_put_ctrl(&ctrl->ctrl); |
507 | } | 505 | } |
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 57dd6d834c28..76b6eedccaf9 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -113,6 +113,7 @@ struct nvmet_ctrl { | |||
113 | 113 | ||
114 | struct mutex lock; | 114 | struct mutex lock; |
115 | u64 cap; | 115 | u64 cap; |
116 | u64 serial; | ||
116 | u32 cc; | 117 | u32 cc; |
117 | u32 csts; | 118 | u32 csts; |
118 | 119 | ||
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index e06d504bdf0c..b4d648536c3e 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state { | |||
77 | NVMET_RDMA_Q_CONNECTING, | 77 | NVMET_RDMA_Q_CONNECTING, |
78 | NVMET_RDMA_Q_LIVE, | 78 | NVMET_RDMA_Q_LIVE, |
79 | NVMET_RDMA_Q_DISCONNECTING, | 79 | NVMET_RDMA_Q_DISCONNECTING, |
80 | NVMET_RDMA_IN_DEVICE_REMOVAL, | ||
80 | }; | 81 | }; |
81 | 82 | ||
82 | struct nvmet_rdma_queue { | 83 | struct nvmet_rdma_queue { |
@@ -615,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, | |||
615 | if (!len) | 616 | if (!len) |
616 | return 0; | 617 | return 0; |
617 | 618 | ||
618 | /* use the already allocated data buffer if possible */ | 619 | status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, |
619 | if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { | 620 | len); |
620 | nvmet_rdma_use_inline_sg(rsp, len, 0); | 621 | if (status) |
621 | } else { | 622 | return status; |
622 | status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, | ||
623 | len); | ||
624 | if (status) | ||
625 | return status; | ||
626 | } | ||
627 | 623 | ||
628 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, | 624 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, |
629 | rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, | 625 | rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, |
@@ -984,7 +980,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) | |||
984 | struct nvmet_rdma_device *dev = queue->dev; | 980 | struct nvmet_rdma_device *dev = queue->dev; |
985 | 981 | ||
986 | nvmet_rdma_free_queue(queue); | 982 | nvmet_rdma_free_queue(queue); |
987 | rdma_destroy_id(cm_id); | 983 | |
984 | if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL) | ||
985 | rdma_destroy_id(cm_id); | ||
986 | |||
988 | kref_put(&dev->ref, nvmet_rdma_free_dev); | 987 | kref_put(&dev->ref, nvmet_rdma_free_dev); |
989 | } | 988 | } |
990 | 989 | ||
@@ -1233,8 +1232,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |||
1233 | switch (queue->state) { | 1232 | switch (queue->state) { |
1234 | case NVMET_RDMA_Q_CONNECTING: | 1233 | case NVMET_RDMA_Q_CONNECTING: |
1235 | case NVMET_RDMA_Q_LIVE: | 1234 | case NVMET_RDMA_Q_LIVE: |
1236 | disconnect = true; | ||
1237 | queue->state = NVMET_RDMA_Q_DISCONNECTING; | 1235 | queue->state = NVMET_RDMA_Q_DISCONNECTING; |
1236 | case NVMET_RDMA_IN_DEVICE_REMOVAL: | ||
1237 | disconnect = true; | ||
1238 | break; | 1238 | break; |
1239 | case NVMET_RDMA_Q_DISCONNECTING: | 1239 | case NVMET_RDMA_Q_DISCONNECTING: |
1240 | break; | 1240 | break; |
@@ -1272,6 +1272,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, | |||
1272 | schedule_work(&queue->release_work); | 1272 | schedule_work(&queue->release_work); |
1273 | } | 1273 | } |
1274 | 1274 | ||
1275 | /** | ||
1276 | * nvme_rdma_device_removal() - Handle RDMA device removal | ||
1277 | * @queue: nvmet rdma queue (cm id qp_context) | ||
1278 | * @addr: nvmet address (cm_id context) | ||
1279 | * | ||
1280 | * DEVICE_REMOVAL event notifies us that the RDMA device is about | ||
1281 | * to unplug so we should take care of destroying our RDMA resources. | ||
1282 | * This event will be generated for each allocated cm_id. | ||
1283 | * | ||
1284 | * Note that this event can be generated on a normal queue cm_id | ||
1285 | * and/or a device bound listener cm_id (where in this case | ||
1286 | * queue will be null). | ||
1287 | * | ||
1288 | * we claim ownership on destroying the cm_id. For queues we move | ||
1289 | * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port | ||
1290 | * we nullify the priv to prevent double cm_id destruction and destroying | ||
1291 | * the cm_id implicitely by returning a non-zero rc to the callout. | ||
1292 | */ | ||
1293 | static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, | ||
1294 | struct nvmet_rdma_queue *queue) | ||
1295 | { | ||
1296 | unsigned long flags; | ||
1297 | |||
1298 | if (!queue) { | ||
1299 | struct nvmet_port *port = cm_id->context; | ||
1300 | |||
1301 | /* | ||
1302 | * This is a listener cm_id. Make sure that | ||
1303 | * future remove_port won't invoke a double | ||
1304 | * cm_id destroy. use atomic xchg to make sure | ||
1305 | * we don't compete with remove_port. | ||
1306 | */ | ||
1307 | if (xchg(&port->priv, NULL) != cm_id) | ||
1308 | return 0; | ||
1309 | } else { | ||
1310 | /* | ||
1311 | * This is a queue cm_id. Make sure that | ||
1312 | * release queue will not destroy the cm_id | ||
1313 | * and schedule all ctrl queues removal (only | ||
1314 | * if the queue is not disconnecting already). | ||
1315 | */ | ||
1316 | spin_lock_irqsave(&queue->state_lock, flags); | ||
1317 | if (queue->state != NVMET_RDMA_Q_DISCONNECTING) | ||
1318 | queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL; | ||
1319 | spin_unlock_irqrestore(&queue->state_lock, flags); | ||
1320 | nvmet_rdma_queue_disconnect(queue); | ||
1321 | flush_scheduled_work(); | ||
1322 | } | ||
1323 | |||
1324 | /* | ||
1325 | * We need to return 1 so that the core will destroy | ||
1326 | * it's own ID. What a great API design.. | ||
1327 | */ | ||
1328 | return 1; | ||
1329 | } | ||
1330 | |||
1275 | static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, | 1331 | static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, |
1276 | struct rdma_cm_event *event) | 1332 | struct rdma_cm_event *event) |
1277 | { | 1333 | { |
@@ -1294,20 +1350,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, | |||
1294 | break; | 1350 | break; |
1295 | case RDMA_CM_EVENT_ADDR_CHANGE: | 1351 | case RDMA_CM_EVENT_ADDR_CHANGE: |
1296 | case RDMA_CM_EVENT_DISCONNECTED: | 1352 | case RDMA_CM_EVENT_DISCONNECTED: |
1297 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | ||
1298 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: | 1353 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
1299 | /* | 1354 | nvmet_rdma_queue_disconnect(queue); |
1300 | * We can get the device removal callback even for a | 1355 | break; |
1301 | * CM ID that we aren't actually using. In that case | 1356 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
1302 | * the context pointer is NULL, so we shouldn't try | 1357 | ret = nvmet_rdma_device_removal(cm_id, queue); |
1303 | * to disconnect a non-existing queue. But we also | ||
1304 | * need to return 1 so that the core will destroy | ||
1305 | * it's own ID. What a great API design.. | ||
1306 | */ | ||
1307 | if (queue) | ||
1308 | nvmet_rdma_queue_disconnect(queue); | ||
1309 | else | ||
1310 | ret = 1; | ||
1311 | break; | 1358 | break; |
1312 | case RDMA_CM_EVENT_REJECTED: | 1359 | case RDMA_CM_EVENT_REJECTED: |
1313 | case RDMA_CM_EVENT_UNREACHABLE: | 1360 | case RDMA_CM_EVENT_UNREACHABLE: |
@@ -1396,9 +1443,10 @@ out_destroy_id: | |||
1396 | 1443 | ||
1397 | static void nvmet_rdma_remove_port(struct nvmet_port *port) | 1444 | static void nvmet_rdma_remove_port(struct nvmet_port *port) |
1398 | { | 1445 | { |
1399 | struct rdma_cm_id *cm_id = port->priv; | 1446 | struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); |
1400 | 1447 | ||
1401 | rdma_destroy_id(cm_id); | 1448 | if (cm_id) |
1449 | rdma_destroy_id(cm_id); | ||
1402 | } | 1450 | } |
1403 | 1451 | ||
1404 | static struct nvmet_fabrics_ops nvmet_rdma_ops = { | 1452 | static struct nvmet_fabrics_ops nvmet_rdma_ops = { |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 7792266db259..3ce69536a7b3 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np, | |||
1631 | */ | 1631 | */ |
1632 | 1632 | ||
1633 | err: | 1633 | err: |
1634 | if (it.node) | 1634 | of_node_put(it.node); |
1635 | of_node_put(it.node); | ||
1636 | return rc; | 1635 | return rc; |
1637 | } | 1636 | } |
1638 | 1637 | ||
@@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs( | |||
2343 | const struct device_node *parent, int port_reg, int reg) | 2342 | const struct device_node *parent, int port_reg, int reg) |
2344 | { | 2343 | { |
2345 | struct of_endpoint endpoint; | 2344 | struct of_endpoint endpoint; |
2346 | struct device_node *node, *prev_node = NULL; | 2345 | struct device_node *node = NULL; |
2347 | |||
2348 | while (1) { | ||
2349 | node = of_graph_get_next_endpoint(parent, prev_node); | ||
2350 | of_node_put(prev_node); | ||
2351 | if (!node) | ||
2352 | break; | ||
2353 | 2346 | ||
2347 | for_each_endpoint_of_node(parent, node) { | ||
2354 | of_graph_parse_endpoint(node, &endpoint); | 2348 | of_graph_parse_endpoint(node, &endpoint); |
2355 | if (((port_reg == -1) || (endpoint.port == port_reg)) && | 2349 | if (((port_reg == -1) || (endpoint.port == port_reg)) && |
2356 | ((reg == -1) || (endpoint.id == reg))) | 2350 | ((reg == -1) || (endpoint.id == reg))) |
2357 | return node; | 2351 | return node; |
2358 | |||
2359 | prev_node = node; | ||
2360 | } | 2352 | } |
2361 | 2353 | ||
2362 | return NULL; | 2354 | return NULL; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 55f1b8391149..085c6389afd1 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob, | |||
517 | pr_warning("End of tree marker overwritten: %08x\n", | 517 | pr_warning("End of tree marker overwritten: %08x\n", |
518 | be32_to_cpup(mem + size)); | 518 | be32_to_cpup(mem + size)); |
519 | 519 | ||
520 | if (detached) { | 520 | if (detached && mynodes) { |
521 | of_node_set_flag(*mynodes, OF_DETACHED); | 521 | of_node_set_flag(*mynodes, OF_DETACHED); |
522 | pr_debug("unflattened tree is detached\n"); | 522 | pr_debug("unflattened tree is detached\n"); |
523 | } | 523 | } |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 89a71c6074fc..a2e68f740eda 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
544 | 544 | ||
545 | list_del(&desc->list); | 545 | list_del(&desc->list); |
546 | 546 | ||
547 | of_node_set_flag(desc->dev, OF_POPULATED); | ||
548 | |||
547 | pr_debug("of_irq_init: init %s (%p), parent %p\n", | 549 | pr_debug("of_irq_init: init %s (%p), parent %p\n", |
548 | desc->dev->full_name, | 550 | desc->dev->full_name, |
549 | desc->dev, desc->interrupt_parent); | 551 | desc->dev, desc->interrupt_parent); |
550 | ret = desc->irq_init_cb(desc->dev, | 552 | ret = desc->irq_init_cb(desc->dev, |
551 | desc->interrupt_parent); | 553 | desc->interrupt_parent); |
552 | if (ret) { | 554 | if (ret) { |
555 | of_node_clear_flag(desc->dev, OF_POPULATED); | ||
553 | kfree(desc); | 556 | kfree(desc); |
554 | continue; | 557 | continue; |
555 | } | 558 | } |
@@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
559 | * its children can get processed in a subsequent pass. | 562 | * its children can get processed in a subsequent pass. |
560 | */ | 563 | */ |
561 | list_add_tail(&desc->list, &intc_parent_list); | 564 | list_add_tail(&desc->list, &intc_parent_list); |
562 | |||
563 | of_node_set_flag(desc->dev, OF_POPULATED); | ||
564 | } | 565 | } |
565 | 566 | ||
566 | /* Get the next pending parent that might have children */ | 567 | /* Get the next pending parent that might have children */ |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 8aa197691074..f39ccd5aa701 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root, | |||
497 | } | 497 | } |
498 | EXPORT_SYMBOL_GPL(of_platform_default_populate); | 498 | EXPORT_SYMBOL_GPL(of_platform_default_populate); |
499 | 499 | ||
500 | #ifndef CONFIG_PPC | ||
500 | static int __init of_platform_default_populate_init(void) | 501 | static int __init of_platform_default_populate_init(void) |
501 | { | 502 | { |
502 | struct device_node *node; | 503 | struct device_node *node; |
@@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void) | |||
521 | return 0; | 522 | return 0; |
522 | } | 523 | } |
523 | arch_initcall_sync(of_platform_default_populate_init); | 524 | arch_initcall_sync(of_platform_default_populate_init); |
525 | #endif | ||
524 | 526 | ||
525 | static int of_platform_device_destroy(struct device *dev, void *data) | 527 | static int of_platform_device_destroy(struct device *dev, void *data) |
526 | { | 528 | { |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index a02981efdad5..98f12223c734 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -1069,7 +1069,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1069 | nvec = maxvec; | 1069 | nvec = maxvec; |
1070 | 1070 | ||
1071 | for (;;) { | 1071 | for (;;) { |
1072 | if (!(flags & PCI_IRQ_NOAFFINITY)) { | 1072 | if (flags & PCI_IRQ_AFFINITY) { |
1073 | dev->irq_affinity = irq_create_affinity_mask(&nvec); | 1073 | dev->irq_affinity = irq_create_affinity_mask(&nvec); |
1074 | if (nvec < minvec) | 1074 | if (nvec < minvec) |
1075 | return -ENOSPC; | 1075 | return -ENOSPC; |
@@ -1105,7 +1105,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
1105 | **/ | 1105 | **/ |
1106 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) | 1106 | int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) |
1107 | { | 1107 | { |
1108 | return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY); | 1108 | return __pci_enable_msi_range(dev, minvec, maxvec, 0); |
1109 | } | 1109 | } |
1110 | EXPORT_SYMBOL(pci_enable_msi_range); | 1110 | EXPORT_SYMBOL(pci_enable_msi_range); |
1111 | 1111 | ||
@@ -1120,7 +1120,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
1120 | return -ERANGE; | 1120 | return -ERANGE; |
1121 | 1121 | ||
1122 | for (;;) { | 1122 | for (;;) { |
1123 | if (!(flags & PCI_IRQ_NOAFFINITY)) { | 1123 | if (flags & PCI_IRQ_AFFINITY) { |
1124 | dev->irq_affinity = irq_create_affinity_mask(&nvec); | 1124 | dev->irq_affinity = irq_create_affinity_mask(&nvec); |
1125 | if (nvec < minvec) | 1125 | if (nvec < minvec) |
1126 | return -ENOSPC; | 1126 | return -ENOSPC; |
@@ -1160,8 +1160,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
1160 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, | 1160 | int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, |
1161 | int minvec, int maxvec) | 1161 | int minvec, int maxvec) |
1162 | { | 1162 | { |
1163 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, | 1163 | return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0); |
1164 | PCI_IRQ_NOAFFINITY); | ||
1165 | } | 1164 | } |
1166 | EXPORT_SYMBOL(pci_enable_msix_range); | 1165 | EXPORT_SYMBOL(pci_enable_msix_range); |
1167 | 1166 | ||
@@ -1187,22 +1186,25 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, | |||
1187 | { | 1186 | { |
1188 | int vecs = -ENOSPC; | 1187 | int vecs = -ENOSPC; |
1189 | 1188 | ||
1190 | if (!(flags & PCI_IRQ_NOMSIX)) { | 1189 | if (flags & PCI_IRQ_MSIX) { |
1191 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, | 1190 | vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, |
1192 | flags); | 1191 | flags); |
1193 | if (vecs > 0) | 1192 | if (vecs > 0) |
1194 | return vecs; | 1193 | return vecs; |
1195 | } | 1194 | } |
1196 | 1195 | ||
1197 | if (!(flags & PCI_IRQ_NOMSI)) { | 1196 | if (flags & PCI_IRQ_MSI) { |
1198 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); | 1197 | vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags); |
1199 | if (vecs > 0) | 1198 | if (vecs > 0) |
1200 | return vecs; | 1199 | return vecs; |
1201 | } | 1200 | } |
1202 | 1201 | ||
1203 | /* use legacy irq if allowed */ | 1202 | /* use legacy irq if allowed */ |
1204 | if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1) | 1203 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) { |
1204 | pci_intx(dev, 1); | ||
1205 | return 1; | 1205 | return 1; |
1206 | } | ||
1207 | |||
1206 | return vecs; | 1208 | return vecs; |
1207 | } | 1209 | } |
1208 | EXPORT_SYMBOL(pci_alloc_irq_vectors); | 1210 | EXPORT_SYMBOL(pci_alloc_irq_vectors); |
@@ -1411,6 +1413,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, | |||
1411 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) | 1413 | if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) |
1412 | pci_msi_domain_update_chip_ops(info); | 1414 | pci_msi_domain_update_chip_ops(info); |
1413 | 1415 | ||
1416 | info->flags |= MSI_FLAG_ACTIVATE_EARLY; | ||
1417 | |||
1414 | domain = msi_create_irq_domain(fwnode, info, parent); | 1418 | domain = msi_create_irq_domain(fwnode, info, parent); |
1415 | if (!domain) | 1419 | if (!domain) |
1416 | return NULL; | 1420 | return NULL; |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6ccb994bdfcb..c494613c1909 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -688,7 +688,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
688 | return 0; | 688 | return 0; |
689 | } | 689 | } |
690 | 690 | ||
691 | static DEFINE_MUTEX(arm_pmu_mutex); | 691 | static DEFINE_SPINLOCK(arm_pmu_lock); |
692 | static LIST_HEAD(arm_pmu_list); | 692 | static LIST_HEAD(arm_pmu_list); |
693 | 693 | ||
694 | /* | 694 | /* |
@@ -701,7 +701,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) | |||
701 | { | 701 | { |
702 | struct arm_pmu *pmu; | 702 | struct arm_pmu *pmu; |
703 | 703 | ||
704 | mutex_lock(&arm_pmu_mutex); | 704 | spin_lock(&arm_pmu_lock); |
705 | list_for_each_entry(pmu, &arm_pmu_list, entry) { | 705 | list_for_each_entry(pmu, &arm_pmu_list, entry) { |
706 | 706 | ||
707 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | 707 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
@@ -709,7 +709,7 @@ static int arm_perf_starting_cpu(unsigned int cpu) | |||
709 | if (pmu->reset) | 709 | if (pmu->reset) |
710 | pmu->reset(pmu); | 710 | pmu->reset(pmu); |
711 | } | 711 | } |
712 | mutex_unlock(&arm_pmu_mutex); | 712 | spin_unlock(&arm_pmu_lock); |
713 | return 0; | 713 | return 0; |
714 | } | 714 | } |
715 | 715 | ||
@@ -821,9 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
821 | if (!cpu_hw_events) | 821 | if (!cpu_hw_events) |
822 | return -ENOMEM; | 822 | return -ENOMEM; |
823 | 823 | ||
824 | mutex_lock(&arm_pmu_mutex); | 824 | spin_lock(&arm_pmu_lock); |
825 | list_add_tail(&cpu_pmu->entry, &arm_pmu_list); | 825 | list_add_tail(&cpu_pmu->entry, &arm_pmu_list); |
826 | mutex_unlock(&arm_pmu_mutex); | 826 | spin_unlock(&arm_pmu_lock); |
827 | 827 | ||
828 | err = cpu_pm_pmu_register(cpu_pmu); | 828 | err = cpu_pm_pmu_register(cpu_pmu); |
829 | if (err) | 829 | if (err) |
@@ -859,9 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
859 | return 0; | 859 | return 0; |
860 | 860 | ||
861 | out_unregister: | 861 | out_unregister: |
862 | mutex_lock(&arm_pmu_mutex); | 862 | spin_lock(&arm_pmu_lock); |
863 | list_del(&cpu_pmu->entry); | 863 | list_del(&cpu_pmu->entry); |
864 | mutex_unlock(&arm_pmu_mutex); | 864 | spin_unlock(&arm_pmu_lock); |
865 | free_percpu(cpu_hw_events); | 865 | free_percpu(cpu_hw_events); |
866 | return err; | 866 | return err; |
867 | } | 867 | } |
@@ -869,9 +869,9 @@ out_unregister: | |||
869 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | 869 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) |
870 | { | 870 | { |
871 | cpu_pm_pmu_unregister(cpu_pmu); | 871 | cpu_pm_pmu_unregister(cpu_pmu); |
872 | mutex_lock(&arm_pmu_mutex); | 872 | spin_lock(&arm_pmu_lock); |
873 | list_del(&cpu_pmu->entry); | 873 | list_del(&cpu_pmu->entry); |
874 | mutex_unlock(&arm_pmu_mutex); | 874 | spin_unlock(&arm_pmu_lock); |
875 | free_percpu(cpu_pmu->hw_events); | 875 | free_percpu(cpu_pmu->hw_events); |
876 | } | 876 | } |
877 | 877 | ||
@@ -967,11 +967,12 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
967 | 967 | ||
968 | /* If we didn't manage to parse anything, try the interrupt affinity */ | 968 | /* If we didn't manage to parse anything, try the interrupt affinity */ |
969 | if (cpumask_weight(&pmu->supported_cpus) == 0) { | 969 | if (cpumask_weight(&pmu->supported_cpus) == 0) { |
970 | if (!using_spi) { | 970 | int irq = platform_get_irq(pdev, 0); |
971 | |||
972 | if (irq_is_percpu(irq)) { | ||
971 | /* If using PPIs, check the affinity of the partition */ | 973 | /* If using PPIs, check the affinity of the partition */ |
972 | int ret, irq; | 974 | int ret; |
973 | 975 | ||
974 | irq = platform_get_irq(pdev, 0); | ||
975 | ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); | 976 | ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); |
976 | if (ret) { | 977 | if (ret) { |
977 | kfree(irqs); | 978 | kfree(irqs); |
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index eb4990ff26ca..7fb765642ee7 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/io.h> | ||
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
16 | #include <linux/pinctrl/pinconf.h> | 17 | #include <linux/pinctrl/pinconf.h> |
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 11623c6b0cb3..44e69c963f5d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c | |||
@@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev) | |||
727 | return PTR_ERR(pc->pcdev); | 727 | return PTR_ERR(pc->pcdev); |
728 | } | 728 | } |
729 | 729 | ||
730 | ret = meson_gpiolib_register(pc); | 730 | return meson_gpiolib_register(pc); |
731 | if (ret) { | ||
732 | pinctrl_unregister(pc->pcdev); | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | return 0; | ||
737 | } | 731 | } |
738 | 732 | ||
739 | static struct platform_driver meson_pinctrl_driver = { | 733 | static struct platform_driver meson_pinctrl_driver = { |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 634b4d30eefb..b3e772390ab6 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset) | |||
43 | 43 | ||
44 | spin_lock_irqsave(&gpio_dev->lock, flags); | 44 | spin_lock_irqsave(&gpio_dev->lock, flags); |
45 | pin_reg = readl(gpio_dev->base + offset * 4); | 45 | pin_reg = readl(gpio_dev->base + offset * 4); |
46 | /* | ||
47 | * Suppose BIOS or Bootloader sets specific debounce for the | ||
48 | * GPIO. if not, set debounce to be 2.75ms and remove glitch. | ||
49 | */ | ||
50 | if ((pin_reg & DB_TMR_OUT_MASK) == 0) { | ||
51 | pin_reg |= 0xf; | ||
52 | pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | ||
53 | pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; | ||
54 | pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | ||
55 | } | ||
56 | |||
57 | pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); | 46 | pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); |
58 | writel(pin_reg, gpio_dev->base + offset * 4); | 47 | writel(pin_reg, gpio_dev->base + offset * 4); |
59 | spin_unlock_irqrestore(&gpio_dev->lock, flags); | 48 | spin_unlock_irqrestore(&gpio_dev->lock, flags); |
@@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d) | |||
326 | 315 | ||
327 | spin_lock_irqsave(&gpio_dev->lock, flags); | 316 | spin_lock_irqsave(&gpio_dev->lock, flags); |
328 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); | 317 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); |
329 | /* | ||
330 | Suppose BIOS or Bootloader sets specific debounce for the | ||
331 | GPIO. if not, set debounce to be 2.75ms. | ||
332 | */ | ||
333 | if ((pin_reg & DB_TMR_OUT_MASK) == 0) { | ||
334 | pin_reg |= 0xf; | ||
335 | pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | ||
336 | pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | ||
337 | } | ||
338 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); | 318 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); |
339 | pin_reg |= BIT(INTERRUPT_MASK_OFF); | 319 | pin_reg |= BIT(INTERRUPT_MASK_OFF); |
340 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); | 320 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); |
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index c6d410ef8de0..7bad200bd67c 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c | |||
@@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) | |||
1432 | { | 1432 | { |
1433 | struct pistachio_pinctrl *pctl; | 1433 | struct pistachio_pinctrl *pctl; |
1434 | struct resource *res; | 1434 | struct resource *res; |
1435 | int ret; | ||
1436 | 1435 | ||
1437 | pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); | 1436 | pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); |
1438 | if (!pctl) | 1437 | if (!pctl) |
@@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) | |||
1464 | return PTR_ERR(pctl->pctldev); | 1463 | return PTR_ERR(pctl->pctldev); |
1465 | } | 1464 | } |
1466 | 1465 | ||
1467 | ret = pistachio_gpio_register(pctl); | 1466 | return pistachio_gpio_register(pctl); |
1468 | if (ret < 0) { | ||
1469 | pinctrl_unregister(pctl->pctldev); | ||
1470 | return ret; | ||
1471 | } | ||
1472 | |||
1473 | return 0; | ||
1474 | } | 1467 | } |
1475 | 1468 | ||
1476 | static struct platform_driver pistachio_pinctrl_driver = { | 1469 | static struct platform_driver pistachio_pinctrl_driver = { |
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 9c65f134d447..da7a75f82489 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c | |||
@@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip, | |||
457 | } | 457 | } |
458 | 458 | ||
459 | static inline void max17042_read_model_data(struct max17042_chip *chip, | 459 | static inline void max17042_read_model_data(struct max17042_chip *chip, |
460 | u8 addr, u32 *data, int size) | 460 | u8 addr, u16 *data, int size) |
461 | { | 461 | { |
462 | struct regmap *map = chip->regmap; | 462 | struct regmap *map = chip->regmap; |
463 | int i; | 463 | int i; |
464 | u32 tmp; | ||
464 | 465 | ||
465 | for (i = 0; i < size; i++) | 466 | for (i = 0; i < size; i++) { |
466 | regmap_read(map, addr + i, &data[i]); | 467 | regmap_read(map, addr + i, &tmp); |
468 | data[i] = (u16)tmp; | ||
469 | } | ||
467 | } | 470 | } |
468 | 471 | ||
469 | static inline int max17042_model_data_compare(struct max17042_chip *chip, | 472 | static inline int max17042_model_data_compare(struct max17042_chip *chip, |
@@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip) | |||
486 | { | 489 | { |
487 | int ret; | 490 | int ret; |
488 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); | 491 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); |
489 | u32 *temp_data; | 492 | u16 *temp_data; |
490 | 493 | ||
491 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); | 494 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); |
492 | if (!temp_data) | 495 | if (!temp_data) |
@@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip) | |||
501 | ret = max17042_model_data_compare( | 504 | ret = max17042_model_data_compare( |
502 | chip, | 505 | chip, |
503 | chip->pdata->config_data->cell_char_tbl, | 506 | chip->pdata->config_data->cell_char_tbl, |
504 | (u16 *)temp_data, | 507 | temp_data, |
505 | table_size); | 508 | table_size); |
506 | 509 | ||
507 | max10742_lock_model(chip); | 510 | max10742_lock_model(chip); |
@@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) | |||
514 | { | 517 | { |
515 | int i; | 518 | int i; |
516 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); | 519 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); |
517 | u32 *temp_data; | 520 | u16 *temp_data; |
518 | int ret = 0; | 521 | int ret = 0; |
519 | 522 | ||
520 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); | 523 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); |
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 3bfac539334b..c74c3f67b8da 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig | |||
@@ -200,8 +200,8 @@ config REBOOT_MODE | |||
200 | config SYSCON_REBOOT_MODE | 200 | config SYSCON_REBOOT_MODE |
201 | tristate "Generic SYSCON regmap reboot mode driver" | 201 | tristate "Generic SYSCON regmap reboot mode driver" |
202 | depends on OF | 202 | depends on OF |
203 | depends on MFD_SYSCON | ||
203 | select REBOOT_MODE | 204 | select REBOOT_MODE |
204 | select MFD_SYSCON | ||
205 | help | 205 | help |
206 | Say y here will enable reboot mode driver. This will | 206 | Say y here will enable reboot mode driver. This will |
207 | get reboot mode arguments and store it in SYSCON mapped | 207 | get reboot mode arguments and store it in SYSCON mapped |
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c index 9ab7f562a83b..f69387e12c1e 100644 --- a/drivers/power/reset/hisi-reboot.c +++ b/drivers/power/reset/hisi-reboot.c | |||
@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev) | |||
53 | 53 | ||
54 | if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { | 54 | if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { |
55 | pr_err("failed to find reboot-offset property\n"); | 55 | pr_err("failed to find reboot-offset property\n"); |
56 | iounmap(base); | ||
56 | return -EINVAL; | 57 | return -EINVAL; |
57 | } | 58 | } |
58 | 59 | ||
59 | err = register_restart_handler(&hisi_restart_nb); | 60 | err = register_restart_handler(&hisi_restart_nb); |
60 | if (err) | 61 | if (err) { |
61 | dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", | 62 | dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", |
62 | err); | 63 | err); |
64 | iounmap(base); | ||
65 | } | ||
63 | 66 | ||
64 | return err; | 67 | return err; |
65 | } | 68 | } |
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c index 73dfae41def8..4c56e54af6ac 100644 --- a/drivers/power/tps65217_charger.c +++ b/drivers/power/tps65217_charger.c | |||
@@ -206,6 +206,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) | |||
206 | if (!charger) | 206 | if (!charger) |
207 | return -ENOMEM; | 207 | return -ENOMEM; |
208 | 208 | ||
209 | platform_set_drvdata(pdev, charger); | ||
209 | charger->tps = tps; | 210 | charger->tps = tps; |
210 | charger->dev = &pdev->dev; | 211 | charger->dev = &pdev->dev; |
211 | 212 | ||
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index cecc15a880de..3fa17ac8df54 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c | |||
@@ -1080,8 +1080,8 @@ static int riocm_send_ack(struct rio_channel *ch) | |||
1080 | static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | 1080 | static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, |
1081 | long timeout) | 1081 | long timeout) |
1082 | { | 1082 | { |
1083 | struct rio_channel *ch = NULL; | 1083 | struct rio_channel *ch; |
1084 | struct rio_channel *new_ch = NULL; | 1084 | struct rio_channel *new_ch; |
1085 | struct conn_req *req; | 1085 | struct conn_req *req; |
1086 | struct cm_peer *peer; | 1086 | struct cm_peer *peer; |
1087 | int found = 0; | 1087 | int found = 0; |
@@ -1155,6 +1155,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | |||
1155 | 1155 | ||
1156 | spin_unlock_bh(&ch->lock); | 1156 | spin_unlock_bh(&ch->lock); |
1157 | riocm_put_channel(ch); | 1157 | riocm_put_channel(ch); |
1158 | ch = NULL; | ||
1158 | kfree(req); | 1159 | kfree(req); |
1159 | 1160 | ||
1160 | down_read(&rdev_sem); | 1161 | down_read(&rdev_sem); |
@@ -1172,7 +1173,7 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | |||
1172 | if (!found) { | 1173 | if (!found) { |
1173 | /* If peer device object not found, simply ignore the request */ | 1174 | /* If peer device object not found, simply ignore the request */ |
1174 | err = -ENODEV; | 1175 | err = -ENODEV; |
1175 | goto err_nodev; | 1176 | goto err_put_new_ch; |
1176 | } | 1177 | } |
1177 | 1178 | ||
1178 | new_ch->rdev = peer->rdev; | 1179 | new_ch->rdev = peer->rdev; |
@@ -1184,15 +1185,16 @@ static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id, | |||
1184 | 1185 | ||
1185 | *new_ch_id = new_ch->id; | 1186 | *new_ch_id = new_ch->id; |
1186 | return new_ch; | 1187 | return new_ch; |
1188 | |||
1189 | err_put_new_ch: | ||
1190 | spin_lock_bh(&idr_lock); | ||
1191 | idr_remove(&ch_idr, new_ch->id); | ||
1192 | spin_unlock_bh(&idr_lock); | ||
1193 | riocm_put_channel(new_ch); | ||
1194 | |||
1187 | err_put: | 1195 | err_put: |
1188 | riocm_put_channel(ch); | 1196 | if (ch) |
1189 | err_nodev: | 1197 | riocm_put_channel(ch); |
1190 | if (new_ch) { | ||
1191 | spin_lock_bh(&idr_lock); | ||
1192 | idr_remove(&ch_idr, new_ch->id); | ||
1193 | spin_unlock_bh(&idr_lock); | ||
1194 | riocm_put_channel(new_ch); | ||
1195 | } | ||
1196 | *new_ch_id = 0; | 1198 | *new_ch_id = 0; |
1197 | return ERR_PTR(err); | 1199 | return ERR_PTR(err); |
1198 | } | 1200 | } |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8973d34ce5ba..fb1b56a71475 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1643 | u8 *sense = NULL; | 1643 | u8 *sense = NULL; |
1644 | int expires; | 1644 | int expires; |
1645 | 1645 | ||
1646 | cqr = (struct dasd_ccw_req *) intparm; | ||
1646 | if (IS_ERR(irb)) { | 1647 | if (IS_ERR(irb)) { |
1647 | switch (PTR_ERR(irb)) { | 1648 | switch (PTR_ERR(irb)) { |
1648 | case -EIO: | 1649 | case -EIO: |
1650 | if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { | ||
1651 | device = (struct dasd_device *) cqr->startdev; | ||
1652 | cqr->status = DASD_CQR_CLEARED; | ||
1653 | dasd_device_clear_timer(device); | ||
1654 | wake_up(&dasd_flush_wq); | ||
1655 | dasd_schedule_device_bh(device); | ||
1656 | return; | ||
1657 | } | ||
1649 | break; | 1658 | break; |
1650 | case -ETIMEDOUT: | 1659 | case -ETIMEDOUT: |
1651 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " | 1660 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " |
@@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1661 | } | 1670 | } |
1662 | 1671 | ||
1663 | now = get_tod_clock(); | 1672 | now = get_tod_clock(); |
1664 | cqr = (struct dasd_ccw_req *) intparm; | ||
1665 | /* check for conditions that should be handled immediately */ | 1673 | /* check for conditions that should be handled immediately */ |
1666 | if (!cqr || | 1674 | if (!cqr || |
1667 | !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | 1675 | !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fd2eff440098..98bbec44bcd0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, | |||
5078 | return PTR_ERR(cqr); | 5078 | return PTR_ERR(cqr); |
5079 | } | 5079 | } |
5080 | 5080 | ||
5081 | cqr->lpm = lpum; | ||
5082 | retry: | ||
5081 | cqr->startdev = device; | 5083 | cqr->startdev = device; |
5082 | cqr->memdev = device; | 5084 | cqr->memdev = device; |
5083 | cqr->block = NULL; | 5085 | cqr->block = NULL; |
@@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, | |||
5122 | (prssdp + 1); | 5124 | (prssdp + 1); |
5123 | memcpy(messages, message_buf, | 5125 | memcpy(messages, message_buf, |
5124 | sizeof(struct dasd_rssd_messages)); | 5126 | sizeof(struct dasd_rssd_messages)); |
5127 | } else if (cqr->lpm) { | ||
5128 | /* | ||
5129 | * on z/VM we might not be able to do I/O on the requested path | ||
5130 | * but instead we get the required information on any path | ||
5131 | * so retry with open path mask | ||
5132 | */ | ||
5133 | cqr->lpm = 0; | ||
5134 | goto retry; | ||
5125 | } else | 5135 | } else |
5126 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, | 5136 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, |
5127 | "Reading messages failed with rc=%d\n" | 5137 | "Reading messages failed with rc=%d\n" |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7ada078ffdd0..6a58bc8f46e2 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
762 | priv->state = DEV_STATE_NOT_OPER; | 762 | priv->state = DEV_STATE_NOT_OPER; |
763 | priv->dev_id.devno = sch->schib.pmcw.dev; | 763 | priv->dev_id.devno = sch->schib.pmcw.dev; |
764 | priv->dev_id.ssid = sch->schid.ssid; | 764 | priv->dev_id.ssid = sch->schid.ssid; |
765 | priv->schid = sch->schid; | ||
766 | 765 | ||
767 | INIT_WORK(&priv->todo_work, ccw_device_todo); | 766 | INIT_WORK(&priv->todo_work, ccw_device_todo); |
768 | INIT_LIST_HEAD(&priv->cmb_list); | 767 | INIT_LIST_HEAD(&priv->cmb_list); |
@@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, | |||
1000 | put_device(&old_sch->dev); | 999 | put_device(&old_sch->dev); |
1001 | /* Initialize new subchannel. */ | 1000 | /* Initialize new subchannel. */ |
1002 | spin_lock_irq(sch->lock); | 1001 | spin_lock_irq(sch->lock); |
1003 | cdev->private->schid = sch->schid; | ||
1004 | cdev->ccwlock = sch->lock; | 1002 | cdev->ccwlock = sch->lock; |
1005 | if (!sch_is_pseudo_sch(sch)) | 1003 | if (!sch_is_pseudo_sch(sch)) |
1006 | sch_set_cdev(sch, cdev); | 1004 | sch_set_cdev(sch, cdev); |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 15b56a15db15..9bc3512374c9 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -26,6 +26,7 @@ | |||
26 | static void | 26 | static void |
27 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | 27 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) |
28 | { | 28 | { |
29 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
29 | char dbf_text[15]; | 30 | char dbf_text[15]; |
30 | 31 | ||
31 | if (!scsw_is_valid_cstat(&irb->scsw) || | 32 | if (!scsw_is_valid_cstat(&irb->scsw) || |
@@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | |||
36 | "received" | 37 | "received" |
37 | " ... device %04x on subchannel 0.%x.%04x, dev_stat " | 38 | " ... device %04x on subchannel 0.%x.%04x, dev_stat " |
38 | ": %02X sch_stat : %02X\n", | 39 | ": %02X sch_stat : %02X\n", |
39 | cdev->private->dev_id.devno, cdev->private->schid.ssid, | 40 | cdev->private->dev_id.devno, sch->schid.ssid, |
40 | cdev->private->schid.sch_no, | 41 | sch->schid.sch_no, |
41 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); | 42 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); |
42 | sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); | 43 | sprintf(dbf_text, "chk%x", sch->schid.sch_no); |
43 | CIO_TRACE_EVENT(0, dbf_text); | 44 | CIO_TRACE_EVENT(0, dbf_text); |
44 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); | 45 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); |
45 | } | 46 | } |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8975060af96c..220f49145b2f 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -120,7 +120,6 @@ struct ccw_device_private { | |||
120 | int state; /* device state */ | 120 | int state; /* device state */ |
121 | atomic_t onoff; | 121 | atomic_t onoff; |
122 | struct ccw_dev_id dev_id; /* device id */ | 122 | struct ccw_dev_id dev_id; /* device id */ |
123 | struct subchannel_id schid; /* subchannel number */ | ||
124 | struct ccw_request req; /* internal I/O request */ | 123 | struct ccw_request req; /* internal I/O request */ |
125 | int iretry; | 124 | int iretry; |
126 | u8 pgid_valid_mask; /* mask of valid PGIDs */ | 125 | u8 pgid_valid_mask; /* mask of valid PGIDs */ |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4bb5262f7aee..71bf9bded485 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) | |||
686 | q->qdio_error = 0; | 686 | q->qdio_error = 0; |
687 | } | 687 | } |
688 | 688 | ||
689 | static inline int qdio_tasklet_schedule(struct qdio_q *q) | ||
690 | { | ||
691 | if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { | ||
692 | tasklet_schedule(&q->tasklet); | ||
693 | return 0; | ||
694 | } | ||
695 | return -EPERM; | ||
696 | } | ||
697 | |||
689 | static void __qdio_inbound_processing(struct qdio_q *q) | 698 | static void __qdio_inbound_processing(struct qdio_q *q) |
690 | { | 699 | { |
691 | qperf_inc(q, tasklet_inbound); | 700 | qperf_inc(q, tasklet_inbound); |
@@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q) | |||
698 | if (!qdio_inbound_q_done(q)) { | 707 | if (!qdio_inbound_q_done(q)) { |
699 | /* means poll time is not yet over */ | 708 | /* means poll time is not yet over */ |
700 | qperf_inc(q, tasklet_inbound_resched); | 709 | qperf_inc(q, tasklet_inbound_resched); |
701 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { | 710 | if (!qdio_tasklet_schedule(q)) |
702 | tasklet_schedule(&q->tasklet); | ||
703 | return; | 711 | return; |
704 | } | ||
705 | } | 712 | } |
706 | 713 | ||
707 | qdio_stop_polling(q); | 714 | qdio_stop_polling(q); |
@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) | |||
711 | */ | 718 | */ |
712 | if (!qdio_inbound_q_done(q)) { | 719 | if (!qdio_inbound_q_done(q)) { |
713 | qperf_inc(q, tasklet_inbound_resched2); | 720 | qperf_inc(q, tasklet_inbound_resched2); |
714 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 721 | qdio_tasklet_schedule(q); |
715 | tasklet_schedule(&q->tasklet); | ||
716 | } | 722 | } |
717 | } | 723 | } |
718 | 724 | ||
@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) | |||
869 | * is noticed and outbound_handler is called after some time. | 875 | * is noticed and outbound_handler is called after some time. |
870 | */ | 876 | */ |
871 | if (qdio_outbound_q_done(q)) | 877 | if (qdio_outbound_q_done(q)) |
872 | del_timer(&q->u.out.timer); | 878 | del_timer_sync(&q->u.out.timer); |
873 | else | 879 | else |
874 | if (!timer_pending(&q->u.out.timer)) | 880 | if (!timer_pending(&q->u.out.timer) && |
881 | likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) | ||
875 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); | 882 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); |
876 | return; | 883 | return; |
877 | 884 | ||
878 | sched: | 885 | sched: |
879 | if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 886 | qdio_tasklet_schedule(q); |
880 | return; | ||
881 | tasklet_schedule(&q->tasklet); | ||
882 | } | 887 | } |
883 | 888 | ||
884 | /* outbound tasklet */ | 889 | /* outbound tasklet */ |
@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) | |||
892 | { | 897 | { |
893 | struct qdio_q *q = (struct qdio_q *)data; | 898 | struct qdio_q *q = (struct qdio_q *)data; |
894 | 899 | ||
895 | if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 900 | qdio_tasklet_schedule(q); |
896 | return; | ||
897 | tasklet_schedule(&q->tasklet); | ||
898 | } | 901 | } |
899 | 902 | ||
900 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) | 903 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) |
@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
907 | 910 | ||
908 | for_each_output_queue(q->irq_ptr, out, i) | 911 | for_each_output_queue(q->irq_ptr, out, i) |
909 | if (!qdio_outbound_q_done(out)) | 912 | if (!qdio_outbound_q_done(out)) |
910 | tasklet_schedule(&out->tasklet); | 913 | qdio_tasklet_schedule(out); |
911 | } | 914 | } |
912 | 915 | ||
913 | static void __tiqdio_inbound_processing(struct qdio_q *q) | 916 | static void __tiqdio_inbound_processing(struct qdio_q *q) |
@@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
929 | 932 | ||
930 | if (!qdio_inbound_q_done(q)) { | 933 | if (!qdio_inbound_q_done(q)) { |
931 | qperf_inc(q, tasklet_inbound_resched); | 934 | qperf_inc(q, tasklet_inbound_resched); |
932 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { | 935 | if (!qdio_tasklet_schedule(q)) |
933 | tasklet_schedule(&q->tasklet); | ||
934 | return; | 936 | return; |
935 | } | ||
936 | } | 937 | } |
937 | 938 | ||
938 | qdio_stop_polling(q); | 939 | qdio_stop_polling(q); |
@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
942 | */ | 943 | */ |
943 | if (!qdio_inbound_q_done(q)) { | 944 | if (!qdio_inbound_q_done(q)) { |
944 | qperf_inc(q, tasklet_inbound_resched2); | 945 | qperf_inc(q, tasklet_inbound_resched2); |
945 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 946 | qdio_tasklet_schedule(q); |
946 | tasklet_schedule(&q->tasklet); | ||
947 | } | 947 | } |
948 | } | 948 | } |
949 | 949 | ||
@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
977 | int i; | 977 | int i; |
978 | struct qdio_q *q; | 978 | struct qdio_q *q; |
979 | 979 | ||
980 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 980 | if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
981 | return; | 981 | return; |
982 | 982 | ||
983 | for_each_input_queue(irq_ptr, q, i) { | 983 | for_each_input_queue(irq_ptr, q, i) { |
@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
1003 | continue; | 1003 | continue; |
1004 | if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) | 1004 | if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) |
1005 | qdio_siga_sync_q(q); | 1005 | qdio_siga_sync_q(q); |
1006 | tasklet_schedule(&q->tasklet); | 1006 | qdio_tasklet_schedule(q); |
1007 | } | 1007 | } |
1008 | } | 1008 | } |
1009 | 1009 | ||
@@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1066 | struct irb *irb) | 1066 | struct irb *irb) |
1067 | { | 1067 | { |
1068 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1068 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1069 | struct subchannel_id schid; | ||
1069 | int cstat, dstat; | 1070 | int cstat, dstat; |
1070 | 1071 | ||
1071 | if (!intparm || !irq_ptr) { | 1072 | if (!intparm || !irq_ptr) { |
1072 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); | 1073 | ccw_device_get_schid(cdev, &schid); |
1074 | DBF_ERROR("qint:%4x", schid.sch_no); | ||
1073 | return; | 1075 | return; |
1074 | } | 1076 | } |
1075 | 1077 | ||
@@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1122 | int qdio_get_ssqd_desc(struct ccw_device *cdev, | 1124 | int qdio_get_ssqd_desc(struct ccw_device *cdev, |
1123 | struct qdio_ssqd_desc *data) | 1125 | struct qdio_ssqd_desc *data) |
1124 | { | 1126 | { |
1127 | struct subchannel_id schid; | ||
1125 | 1128 | ||
1126 | if (!cdev || !cdev->private) | 1129 | if (!cdev || !cdev->private) |
1127 | return -EINVAL; | 1130 | return -EINVAL; |
1128 | 1131 | ||
1129 | DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); | 1132 | ccw_device_get_schid(cdev, &schid); |
1130 | return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); | 1133 | DBF_EVENT("get ssqd:%4x", schid.sch_no); |
1134 | return qdio_setup_get_ssqd(NULL, &schid, data); | ||
1131 | } | 1135 | } |
1132 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | 1136 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); |
1133 | 1137 | ||
@@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1141 | tasklet_kill(&q->tasklet); | 1145 | tasklet_kill(&q->tasklet); |
1142 | 1146 | ||
1143 | for_each_output_queue(irq_ptr, q, i) { | 1147 | for_each_output_queue(irq_ptr, q, i) { |
1144 | del_timer(&q->u.out.timer); | 1148 | del_timer_sync(&q->u.out.timer); |
1145 | tasklet_kill(&q->tasklet); | 1149 | tasklet_kill(&q->tasklet); |
1146 | } | 1150 | } |
1147 | } | 1151 | } |
@@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1154 | int qdio_shutdown(struct ccw_device *cdev, int how) | 1158 | int qdio_shutdown(struct ccw_device *cdev, int how) |
1155 | { | 1159 | { |
1156 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1160 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1161 | struct subchannel_id schid; | ||
1157 | int rc; | 1162 | int rc; |
1158 | unsigned long flags; | ||
1159 | 1163 | ||
1160 | if (!irq_ptr) | 1164 | if (!irq_ptr) |
1161 | return -ENODEV; | 1165 | return -ENODEV; |
1162 | 1166 | ||
1163 | WARN_ON_ONCE(irqs_disabled()); | 1167 | WARN_ON_ONCE(irqs_disabled()); |
1164 | DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); | 1168 | ccw_device_get_schid(cdev, &schid); |
1169 | DBF_EVENT("qshutdown:%4x", schid.sch_no); | ||
1165 | 1170 | ||
1166 | mutex_lock(&irq_ptr->setup_mutex); | 1171 | mutex_lock(&irq_ptr->setup_mutex); |
1167 | /* | 1172 | /* |
@@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1184 | qdio_shutdown_debug_entries(irq_ptr); | 1189 | qdio_shutdown_debug_entries(irq_ptr); |
1185 | 1190 | ||
1186 | /* cleanup subchannel */ | 1191 | /* cleanup subchannel */ |
1187 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1192 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1188 | 1193 | ||
1189 | if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) | 1194 | if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) |
1190 | rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); | 1195 | rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); |
@@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1198 | } | 1203 | } |
1199 | 1204 | ||
1200 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); | 1205 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); |
1201 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1206 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
1202 | wait_event_interruptible_timeout(cdev->private->wait_q, | 1207 | wait_event_interruptible_timeout(cdev->private->wait_q, |
1203 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || | 1208 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || |
1204 | irq_ptr->state == QDIO_IRQ_STATE_ERR, | 1209 | irq_ptr->state == QDIO_IRQ_STATE_ERR, |
1205 | 10 * HZ); | 1210 | 10 * HZ); |
1206 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1211 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1207 | 1212 | ||
1208 | no_cleanup: | 1213 | no_cleanup: |
1209 | qdio_shutdown_thinint(irq_ptr); | 1214 | qdio_shutdown_thinint(irq_ptr); |
@@ -1211,7 +1216,7 @@ no_cleanup: | |||
1211 | /* restore interrupt handler */ | 1216 | /* restore interrupt handler */ |
1212 | if ((void *)cdev->handler == (void *)qdio_int_handler) | 1217 | if ((void *)cdev->handler == (void *)qdio_int_handler) |
1213 | cdev->handler = irq_ptr->orig_handler; | 1218 | cdev->handler = irq_ptr->orig_handler; |
1214 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1219 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
1215 | 1220 | ||
1216 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | 1221 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); |
1217 | mutex_unlock(&irq_ptr->setup_mutex); | 1222 | mutex_unlock(&irq_ptr->setup_mutex); |
@@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); | |||
1228 | int qdio_free(struct ccw_device *cdev) | 1233 | int qdio_free(struct ccw_device *cdev) |
1229 | { | 1234 | { |
1230 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1235 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1236 | struct subchannel_id schid; | ||
1231 | 1237 | ||
1232 | if (!irq_ptr) | 1238 | if (!irq_ptr) |
1233 | return -ENODEV; | 1239 | return -ENODEV; |
1234 | 1240 | ||
1235 | DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); | 1241 | ccw_device_get_schid(cdev, &schid); |
1242 | DBF_EVENT("qfree:%4x", schid.sch_no); | ||
1236 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); | 1243 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); |
1237 | mutex_lock(&irq_ptr->setup_mutex); | 1244 | mutex_lock(&irq_ptr->setup_mutex); |
1238 | 1245 | ||
@@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free); | |||
1251 | */ | 1258 | */ |
1252 | int qdio_allocate(struct qdio_initialize *init_data) | 1259 | int qdio_allocate(struct qdio_initialize *init_data) |
1253 | { | 1260 | { |
1261 | struct subchannel_id schid; | ||
1254 | struct qdio_irq *irq_ptr; | 1262 | struct qdio_irq *irq_ptr; |
1255 | 1263 | ||
1256 | DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); | 1264 | ccw_device_get_schid(init_data->cdev, &schid); |
1265 | DBF_EVENT("qallocate:%4x", schid.sch_no); | ||
1257 | 1266 | ||
1258 | if ((init_data->no_input_qs && !init_data->input_handler) || | 1267 | if ((init_data->no_input_qs && !init_data->input_handler) || |
1259 | (init_data->no_output_qs && !init_data->output_handler)) | 1268 | (init_data->no_output_qs && !init_data->output_handler)) |
@@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) | |||
1331 | */ | 1340 | */ |
1332 | int qdio_establish(struct qdio_initialize *init_data) | 1341 | int qdio_establish(struct qdio_initialize *init_data) |
1333 | { | 1342 | { |
1334 | struct qdio_irq *irq_ptr; | ||
1335 | struct ccw_device *cdev = init_data->cdev; | 1343 | struct ccw_device *cdev = init_data->cdev; |
1336 | unsigned long saveflags; | 1344 | struct subchannel_id schid; |
1345 | struct qdio_irq *irq_ptr; | ||
1337 | int rc; | 1346 | int rc; |
1338 | 1347 | ||
1339 | DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); | 1348 | ccw_device_get_schid(cdev, &schid); |
1349 | DBF_EVENT("qestablish:%4x", schid.sch_no); | ||
1340 | 1350 | ||
1341 | irq_ptr = cdev->private->qdio_data; | 1351 | irq_ptr = cdev->private->qdio_data; |
1342 | if (!irq_ptr) | 1352 | if (!irq_ptr) |
1343 | return -ENODEV; | 1353 | return -ENODEV; |
1344 | 1354 | ||
1345 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
1346 | return -EINVAL; | ||
1347 | |||
1348 | mutex_lock(&irq_ptr->setup_mutex); | 1355 | mutex_lock(&irq_ptr->setup_mutex); |
1349 | qdio_setup_irq(init_data); | 1356 | qdio_setup_irq(init_data); |
1350 | 1357 | ||
@@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1361 | irq_ptr->ccw.count = irq_ptr->equeue.count; | 1368 | irq_ptr->ccw.count = irq_ptr->equeue.count; |
1362 | irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); | 1369 | irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); |
1363 | 1370 | ||
1364 | spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); | 1371 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1365 | ccw_device_set_options_mask(cdev, 0); | 1372 | ccw_device_set_options_mask(cdev, 0); |
1366 | 1373 | ||
1367 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); | 1374 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); |
1375 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
1368 | if (rc) { | 1376 | if (rc) { |
1369 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); | 1377 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); |
1370 | DBF_ERROR("rc:%4x", rc); | 1378 | DBF_ERROR("rc:%4x", rc); |
1371 | } | ||
1372 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | ||
1373 | |||
1374 | if (rc) { | ||
1375 | mutex_unlock(&irq_ptr->setup_mutex); | 1379 | mutex_unlock(&irq_ptr->setup_mutex); |
1376 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | 1380 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
1377 | return rc; | 1381 | return rc; |
@@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish); | |||
1407 | */ | 1411 | */ |
1408 | int qdio_activate(struct ccw_device *cdev) | 1412 | int qdio_activate(struct ccw_device *cdev) |
1409 | { | 1413 | { |
1414 | struct subchannel_id schid; | ||
1410 | struct qdio_irq *irq_ptr; | 1415 | struct qdio_irq *irq_ptr; |
1411 | int rc; | 1416 | int rc; |
1412 | unsigned long saveflags; | ||
1413 | 1417 | ||
1414 | DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); | 1418 | ccw_device_get_schid(cdev, &schid); |
1419 | DBF_EVENT("qactivate:%4x", schid.sch_no); | ||
1415 | 1420 | ||
1416 | irq_ptr = cdev->private->qdio_data; | 1421 | irq_ptr = cdev->private->qdio_data; |
1417 | if (!irq_ptr) | 1422 | if (!irq_ptr) |
1418 | return -ENODEV; | 1423 | return -ENODEV; |
1419 | 1424 | ||
1420 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
1421 | return -EINVAL; | ||
1422 | |||
1423 | mutex_lock(&irq_ptr->setup_mutex); | 1425 | mutex_lock(&irq_ptr->setup_mutex); |
1424 | if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { | 1426 | if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { |
1425 | rc = -EBUSY; | 1427 | rc = -EBUSY; |
@@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev) | |||
1431 | irq_ptr->ccw.count = irq_ptr->aqueue.count; | 1433 | irq_ptr->ccw.count = irq_ptr->aqueue.count; |
1432 | irq_ptr->ccw.cda = 0; | 1434 | irq_ptr->ccw.cda = 0; |
1433 | 1435 | ||
1434 | spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); | 1436 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1435 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); | 1437 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); |
1436 | 1438 | ||
1437 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, | 1439 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, |
1438 | 0, DOIO_DENY_PREFETCH); | 1440 | 0, DOIO_DENY_PREFETCH); |
1441 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
1439 | if (rc) { | 1442 | if (rc) { |
1440 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); | 1443 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); |
1441 | DBF_ERROR("rc:%4x", rc); | 1444 | DBF_ERROR("rc:%4x", rc); |
1442 | } | ||
1443 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | ||
1444 | |||
1445 | if (rc) | ||
1446 | goto out; | 1445 | goto out; |
1446 | } | ||
1447 | 1447 | ||
1448 | if (is_thinint_irq(irq_ptr)) | 1448 | if (is_thinint_irq(irq_ptr)) |
1449 | tiqdio_add_input_queues(irq_ptr); | 1449 | tiqdio_add_input_queues(irq_ptr); |
@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1585 | 1585 | ||
1586 | /* in case of SIGA errors we must process the error immediately */ | 1586 | /* in case of SIGA errors we must process the error immediately */ |
1587 | if (used >= q->u.out.scan_threshold || rc) | 1587 | if (used >= q->u.out.scan_threshold || rc) |
1588 | tasklet_schedule(&q->tasklet); | 1588 | qdio_tasklet_schedule(q); |
1589 | else | 1589 | else |
1590 | /* free the SBALs in case of no further traffic */ | 1590 | /* free the SBALs in case of no further traffic */ |
1591 | if (!timer_pending(&q->u.out.timer)) | 1591 | if (!timer_pending(&q->u.out.timer) && |
1592 | likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) | ||
1592 | mod_timer(&q->u.out.timer, jiffies + HZ); | 1593 | mod_timer(&q->u.out.timer, jiffies + HZ); |
1593 | return rc; | 1594 | return rc; |
1594 | } | 1595 | } |
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile index 241891a57caf..df40692a9011 100644 --- a/drivers/s390/virtio/Makefile +++ b/drivers/s390/virtio/Makefile | |||
@@ -6,4 +6,8 @@ | |||
6 | # it under the terms of the GNU General Public License (version 2 only) | 6 | # it under the terms of the GNU General Public License (version 2 only) |
7 | # as published by the Free Software Foundation. | 7 | # as published by the Free Software Foundation. |
8 | 8 | ||
9 | obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o | 9 | s390-virtio-objs := virtio_ccw.o |
10 | ifdef CONFIG_S390_GUEST_OLD_TRANSPORT | ||
11 | s390-virtio-objs += kvm_virtio.o | ||
12 | endif | ||
13 | obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs) | ||
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 1d060fd293a3..5e5c11f37b24 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c | |||
@@ -458,6 +458,8 @@ static int __init kvm_devices_init(void) | |||
458 | if (test_devices_support(total_memory_size) < 0) | 458 | if (test_devices_support(total_memory_size) < 0) |
459 | return -ENODEV; | 459 | return -ENODEV; |
460 | 460 | ||
461 | pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n"); | ||
462 | |||
461 | rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); | 463 | rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); |
462 | if (rc) | 464 | if (rc) |
463 | return rc; | 465 | return rc; |
@@ -482,7 +484,7 @@ static int __init kvm_devices_init(void) | |||
482 | } | 484 | } |
483 | 485 | ||
484 | /* code for early console output with virtio_console */ | 486 | /* code for early console output with virtio_console */ |
485 | static __init int early_put_chars(u32 vtermno, const char *buf, int count) | 487 | static int early_put_chars(u32 vtermno, const char *buf, int count) |
486 | { | 488 | { |
487 | char scratch[17]; | 489 | char scratch[17]; |
488 | unsigned int len = count; | 490 | unsigned int len = count; |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index b381b3718a98..5648b715fed9 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
63 | struct fib *fibptr; | 63 | struct fib *fibptr; |
64 | struct hw_fib * hw_fib = (struct hw_fib *)0; | 64 | struct hw_fib * hw_fib = (struct hw_fib *)0; |
65 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; | 65 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; |
66 | unsigned size; | 66 | unsigned int size, osize; |
67 | int retval; | 67 | int retval; |
68 | 68 | ||
69 | if (dev->in_reset) { | 69 | if (dev->in_reset) { |
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
87 | * will not overrun the buffer when we copy the memory. Return | 87 | * will not overrun the buffer when we copy the memory. Return |
88 | * an error if we would. | 88 | * an error if we would. |
89 | */ | 89 | */ |
90 | size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); | 90 | osize = size = le16_to_cpu(kfib->header.Size) + |
91 | sizeof(struct aac_fibhdr); | ||
91 | if (size < le16_to_cpu(kfib->header.SenderSize)) | 92 | if (size < le16_to_cpu(kfib->header.SenderSize)) |
92 | size = le16_to_cpu(kfib->header.SenderSize); | 93 | size = le16_to_cpu(kfib->header.SenderSize); |
93 | if (size > dev->max_fib_size) { | 94 | if (size > dev->max_fib_size) { |
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
118 | goto cleanup; | 119 | goto cleanup; |
119 | } | 120 | } |
120 | 121 | ||
122 | /* Sanity check the second copy */ | ||
123 | if ((osize != le16_to_cpu(kfib->header.Size) + | ||
124 | sizeof(struct aac_fibhdr)) | ||
125 | || (size < le16_to_cpu(kfib->header.SenderSize))) { | ||
126 | retval = -EINVAL; | ||
127 | goto cleanup; | ||
128 | } | ||
129 | |||
121 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { | 130 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { |
122 | aac_adapter_interrupt(dev); | 131 | aac_adapter_interrupt(dev); |
123 | /* | 132 | /* |
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index a569c65f22b1..dcf36537a767 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
2923 | mutex_unlock(&fip->ctlr_mutex); | 2923 | mutex_unlock(&fip->ctlr_mutex); |
2924 | 2924 | ||
2925 | drop: | 2925 | drop: |
2926 | kfree(skb); | 2926 | kfree_skb(skb); |
2927 | return rc; | 2927 | return rc; |
2928 | } | 2928 | } |
2929 | 2929 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2dab3dc2aa69..c1ed25adb17e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
5037 | /* Find first memory bar */ | 5037 | /* Find first memory bar */ |
5038 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); | 5038 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); |
5039 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); | 5039 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); |
5040 | if (pci_request_selected_regions(instance->pdev, instance->bar, | 5040 | if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, |
5041 | "megasas: LSI")) { | 5041 | "megasas: LSI")) { |
5042 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); | 5042 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); |
5043 | return -EBUSY; | 5043 | return -EBUSY; |
@@ -5339,7 +5339,7 @@ fail_ready_state: | |||
5339 | iounmap(instance->reg_set); | 5339 | iounmap(instance->reg_set); |
5340 | 5340 | ||
5341 | fail_ioremap: | 5341 | fail_ioremap: |
5342 | pci_release_selected_regions(instance->pdev, instance->bar); | 5342 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
5343 | 5343 | ||
5344 | return -EINVAL; | 5344 | return -EINVAL; |
5345 | } | 5345 | } |
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance) | |||
5360 | 5360 | ||
5361 | iounmap(instance->reg_set); | 5361 | iounmap(instance->reg_set); |
5362 | 5362 | ||
5363 | pci_release_selected_regions(instance->pdev, instance->bar); | 5363 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
5364 | } | 5364 | } |
5365 | 5365 | ||
5366 | /** | 5366 | /** |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ec837544f784..52d8bbf7feb5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance) | |||
2603 | 2603 | ||
2604 | iounmap(instance->reg_set); | 2604 | iounmap(instance->reg_set); |
2605 | 2605 | ||
2606 | pci_release_selected_regions(instance->pdev, instance->bar); | 2606 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
2607 | } | 2607 | } |
2608 | 2608 | ||
2609 | /** | 2609 | /** |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 751f13edece0..750f82c339d4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | |||
2188 | } else | 2188 | } else |
2189 | ioc->msix96_vector = 0; | 2189 | ioc->msix96_vector = 0; |
2190 | 2190 | ||
2191 | if (ioc->is_warpdrive) { | ||
2192 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) | ||
2193 | &ioc->chip->ReplyPostHostIndex; | ||
2194 | |||
2195 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) | ||
2196 | ioc->reply_post_host_index[i] = | ||
2197 | (resource_size_t __iomem *) | ||
2198 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | ||
2199 | * 4))); | ||
2200 | } | ||
2201 | |||
2191 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) | 2202 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) |
2192 | pr_info(MPT3SAS_FMT "%s: IRQ %d\n", | 2203 | pr_info(MPT3SAS_FMT "%s: IRQ %d\n", |
2193 | reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : | 2204 | reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : |
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |||
5280 | if (r) | 5291 | if (r) |
5281 | goto out_free_resources; | 5292 | goto out_free_resources; |
5282 | 5293 | ||
5283 | if (ioc->is_warpdrive) { | ||
5284 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) | ||
5285 | &ioc->chip->ReplyPostHostIndex; | ||
5286 | |||
5287 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) | ||
5288 | ioc->reply_post_host_index[i] = | ||
5289 | (resource_size_t __iomem *) | ||
5290 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | ||
5291 | * 4))); | ||
5292 | } | ||
5293 | |||
5294 | pci_set_drvdata(ioc->pdev, ioc->shost); | 5294 | pci_set_drvdata(ioc->pdev, ioc->shost); |
5295 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); | 5295 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); |
5296 | if (r) | 5296 | if (r) |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 53ef1cb6418e..0e8601aa877a 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) | |||
778 | if (!edev) | 778 | if (!edev) |
779 | return; | 779 | return; |
780 | 780 | ||
781 | enclosure_unregister(edev); | ||
782 | |||
781 | ses_dev = edev->scratch; | 783 | ses_dev = edev->scratch; |
782 | edev->scratch = NULL; | 784 | edev->scratch = NULL; |
783 | 785 | ||
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) | |||
789 | kfree(edev->component[0].scratch); | 791 | kfree(edev->component[0].scratch); |
790 | 792 | ||
791 | put_device(&edev->edev); | 793 | put_device(&edev->edev); |
792 | enclosure_unregister(edev); | ||
793 | } | 794 | } |
794 | 795 | ||
795 | static void ses_intf_remove(struct device *cdev, | 796 | static void ses_intf_remove(struct device *cdev, |
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c index 1b4ff0f4c716..ed5dd0e88657 100644 --- a/drivers/thermal/clock_cooling.c +++ b/drivers/thermal/clock_cooling.c | |||
@@ -426,6 +426,7 @@ clock_cooling_register(struct device *dev, const char *clock_name) | |||
426 | if (!ccdev) | 426 | if (!ccdev) |
427 | return ERR_PTR(-ENOMEM); | 427 | return ERR_PTR(-ENOMEM); |
428 | 428 | ||
429 | mutex_init(&ccdev->lock); | ||
429 | ccdev->dev = dev; | 430 | ccdev->dev = dev; |
430 | ccdev->clk = devm_clk_get(dev, clock_name); | 431 | ccdev->clk = devm_clk_get(dev, clock_name); |
431 | if (IS_ERR(ccdev->clk)) | 432 | if (IS_ERR(ccdev->clk)) |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 3788ed74c9ab..a32b41783b77 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, | |||
740 | } | 740 | } |
741 | 741 | ||
742 | /* Bind cpufreq callbacks to thermal cooling device ops */ | 742 | /* Bind cpufreq callbacks to thermal cooling device ops */ |
743 | |||
743 | static struct thermal_cooling_device_ops cpufreq_cooling_ops = { | 744 | static struct thermal_cooling_device_ops cpufreq_cooling_ops = { |
744 | .get_max_state = cpufreq_get_max_state, | 745 | .get_max_state = cpufreq_get_max_state, |
745 | .get_cur_state = cpufreq_get_cur_state, | 746 | .get_cur_state = cpufreq_get_cur_state, |
746 | .set_cur_state = cpufreq_set_cur_state, | 747 | .set_cur_state = cpufreq_set_cur_state, |
747 | }; | 748 | }; |
748 | 749 | ||
750 | static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = { | ||
751 | .get_max_state = cpufreq_get_max_state, | ||
752 | .get_cur_state = cpufreq_get_cur_state, | ||
753 | .set_cur_state = cpufreq_set_cur_state, | ||
754 | .get_requested_power = cpufreq_get_requested_power, | ||
755 | .state2power = cpufreq_state2power, | ||
756 | .power2state = cpufreq_power2state, | ||
757 | }; | ||
758 | |||
749 | /* Notifier for cpufreq policy change */ | 759 | /* Notifier for cpufreq policy change */ |
750 | static struct notifier_block thermal_cpufreq_notifier_block = { | 760 | static struct notifier_block thermal_cpufreq_notifier_block = { |
751 | .notifier_call = cpufreq_thermal_notifier, | 761 | .notifier_call = cpufreq_thermal_notifier, |
@@ -795,6 +805,7 @@ __cpufreq_cooling_register(struct device_node *np, | |||
795 | struct cpumask temp_mask; | 805 | struct cpumask temp_mask; |
796 | unsigned int freq, i, num_cpus; | 806 | unsigned int freq, i, num_cpus; |
797 | int ret; | 807 | int ret; |
808 | struct thermal_cooling_device_ops *cooling_ops; | ||
798 | 809 | ||
799 | cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); | 810 | cpumask_and(&temp_mask, clip_cpus, cpu_online_mask); |
800 | policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); | 811 | policy = cpufreq_cpu_get(cpumask_first(&temp_mask)); |
@@ -850,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np, | |||
850 | cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); | 861 | cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); |
851 | 862 | ||
852 | if (capacitance) { | 863 | if (capacitance) { |
853 | cpufreq_cooling_ops.get_requested_power = | ||
854 | cpufreq_get_requested_power; | ||
855 | cpufreq_cooling_ops.state2power = cpufreq_state2power; | ||
856 | cpufreq_cooling_ops.power2state = cpufreq_power2state; | ||
857 | cpufreq_dev->plat_get_static_power = plat_static_func; | 864 | cpufreq_dev->plat_get_static_power = plat_static_func; |
858 | 865 | ||
859 | ret = build_dyn_power_table(cpufreq_dev, capacitance); | 866 | ret = build_dyn_power_table(cpufreq_dev, capacitance); |
@@ -861,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np, | |||
861 | cool_dev = ERR_PTR(ret); | 868 | cool_dev = ERR_PTR(ret); |
862 | goto free_table; | 869 | goto free_table; |
863 | } | 870 | } |
871 | |||
872 | cooling_ops = &cpufreq_power_cooling_ops; | ||
873 | } else { | ||
874 | cooling_ops = &cpufreq_cooling_ops; | ||
864 | } | 875 | } |
865 | 876 | ||
866 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); | 877 | ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); |
@@ -885,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np, | |||
885 | cpufreq_dev->id); | 896 | cpufreq_dev->id); |
886 | 897 | ||
887 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | 898 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, |
888 | &cpufreq_cooling_ops); | 899 | cooling_ops); |
889 | if (IS_ERR(cool_dev)) | 900 | if (IS_ERR(cool_dev)) |
890 | goto remove_idr; | 901 | goto remove_idr; |
891 | 902 | ||
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index 34fe36504a55..68bd1b569118 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c | |||
@@ -116,7 +116,9 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) | |||
116 | instance->target = get_target_state(tz, cdev, percentage, | 116 | instance->target = get_target_state(tz, cdev, percentage, |
117 | cur_trip_level); | 117 | cur_trip_level); |
118 | 118 | ||
119 | mutex_lock(&instance->cdev->lock); | ||
119 | instance->cdev->updated = false; | 120 | instance->cdev->updated = false; |
121 | mutex_unlock(&instance->cdev->lock); | ||
120 | thermal_cdev_update(cdev); | 122 | thermal_cdev_update(cdev); |
121 | } | 123 | } |
122 | return 0; | 124 | return 0; |
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index fc52016d4e85..bb118a152cbb 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c | |||
@@ -71,7 +71,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) | |||
71 | dev_dbg(&instance->cdev->device, "target=%d\n", | 71 | dev_dbg(&instance->cdev->device, "target=%d\n", |
72 | (int)instance->target); | 72 | (int)instance->target); |
73 | 73 | ||
74 | mutex_lock(&instance->cdev->lock); | ||
74 | instance->cdev->updated = false; /* cdev needs update */ | 75 | instance->cdev->updated = false; /* cdev needs update */ |
76 | mutex_unlock(&instance->cdev->lock); | ||
75 | } | 77 | } |
76 | 78 | ||
77 | mutex_unlock(&tz->lock); | 79 | mutex_unlock(&tz->lock); |
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c5547bd711db..e473548b5d28 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c | |||
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match); | |||
471 | 471 | ||
472 | static int imx_thermal_probe(struct platform_device *pdev) | 472 | static int imx_thermal_probe(struct platform_device *pdev) |
473 | { | 473 | { |
474 | const struct of_device_id *of_id = | ||
475 | of_match_device(of_imx_thermal_match, &pdev->dev); | ||
476 | struct imx_thermal_data *data; | 474 | struct imx_thermal_data *data; |
477 | struct regmap *map; | 475 | struct regmap *map; |
478 | int measure_freq; | 476 | int measure_freq; |
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev) | |||
490 | } | 488 | } |
491 | data->tempmon = map; | 489 | data->tempmon = map; |
492 | 490 | ||
493 | data->socdata = of_id->data; | 491 | data->socdata = of_device_get_match_data(&pdev->dev); |
494 | 492 | ||
495 | /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ | 493 | /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */ |
496 | if (data->socdata->version == TEMPMON_IMX6SX) { | 494 | if (data->socdata->version == TEMPMON_IMX6SX) { |
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index a578cd257db4..1891f34ab7fc 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c | |||
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = { | |||
225 | .remove = int3406_thermal_remove, | 225 | .remove = int3406_thermal_remove, |
226 | .driver = { | 226 | .driver = { |
227 | .name = "int3406 thermal", | 227 | .name = "int3406 thermal", |
228 | .owner = THIS_MODULE, | ||
229 | .acpi_match_table = int3406_thermal_match, | 228 | .acpi_match_table = int3406_thermal_match, |
230 | }, | 229 | }, |
231 | }; | 230 | }; |
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index 6a6ec1c95a7a..9b4815e81b0d 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/thermal.h> | 23 | #include <linux/thermal.h> |
24 | #include <linux/pm.h> | ||
24 | 25 | ||
25 | /* Intel PCH thermal Device IDs */ | 26 | /* Intel PCH thermal Device IDs */ |
26 | #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ | 27 | #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ |
@@ -65,6 +66,7 @@ struct pch_thermal_device { | |||
65 | unsigned long crt_temp; | 66 | unsigned long crt_temp; |
66 | int hot_trip_id; | 67 | int hot_trip_id; |
67 | unsigned long hot_temp; | 68 | unsigned long hot_temp; |
69 | bool bios_enabled; | ||
68 | }; | 70 | }; |
69 | 71 | ||
70 | static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) | 72 | static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) |
@@ -75,8 +77,10 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips) | |||
75 | *nr_trips = 0; | 77 | *nr_trips = 0; |
76 | 78 | ||
77 | /* Check if BIOS has already enabled thermal sensor */ | 79 | /* Check if BIOS has already enabled thermal sensor */ |
78 | if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) | 80 | if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) { |
81 | ptd->bios_enabled = true; | ||
79 | goto read_trips; | 82 | goto read_trips; |
83 | } | ||
80 | 84 | ||
81 | tsel = readb(ptd->hw_base + WPT_TSEL); | 85 | tsel = readb(ptd->hw_base + WPT_TSEL); |
82 | /* | 86 | /* |
@@ -130,9 +134,39 @@ static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp) | |||
130 | return 0; | 134 | return 0; |
131 | } | 135 | } |
132 | 136 | ||
137 | static int pch_wpt_suspend(struct pch_thermal_device *ptd) | ||
138 | { | ||
139 | u8 tsel; | ||
140 | |||
141 | if (ptd->bios_enabled) | ||
142 | return 0; | ||
143 | |||
144 | tsel = readb(ptd->hw_base + WPT_TSEL); | ||
145 | |||
146 | writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL); | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int pch_wpt_resume(struct pch_thermal_device *ptd) | ||
152 | { | ||
153 | u8 tsel; | ||
154 | |||
155 | if (ptd->bios_enabled) | ||
156 | return 0; | ||
157 | |||
158 | tsel = readb(ptd->hw_base + WPT_TSEL); | ||
159 | |||
160 | writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL); | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
133 | struct pch_dev_ops { | 165 | struct pch_dev_ops { |
134 | int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips); | 166 | int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips); |
135 | int (*get_temp)(struct pch_thermal_device *ptd, int *temp); | 167 | int (*get_temp)(struct pch_thermal_device *ptd, int *temp); |
168 | int (*suspend)(struct pch_thermal_device *ptd); | ||
169 | int (*resume)(struct pch_thermal_device *ptd); | ||
136 | }; | 170 | }; |
137 | 171 | ||
138 | 172 | ||
@@ -140,6 +174,8 @@ struct pch_dev_ops { | |||
140 | static const struct pch_dev_ops pch_dev_ops_wpt = { | 174 | static const struct pch_dev_ops pch_dev_ops_wpt = { |
141 | .hw_init = pch_wpt_init, | 175 | .hw_init = pch_wpt_init, |
142 | .get_temp = pch_wpt_get_temp, | 176 | .get_temp = pch_wpt_get_temp, |
177 | .suspend = pch_wpt_suspend, | ||
178 | .resume = pch_wpt_resume, | ||
143 | }; | 179 | }; |
144 | 180 | ||
145 | static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp) | 181 | static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp) |
@@ -269,6 +305,22 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev) | |||
269 | pci_disable_device(pdev); | 305 | pci_disable_device(pdev); |
270 | } | 306 | } |
271 | 307 | ||
308 | static int intel_pch_thermal_suspend(struct device *device) | ||
309 | { | ||
310 | struct pci_dev *pdev = to_pci_dev(device); | ||
311 | struct pch_thermal_device *ptd = pci_get_drvdata(pdev); | ||
312 | |||
313 | return ptd->ops->suspend(ptd); | ||
314 | } | ||
315 | |||
316 | static int intel_pch_thermal_resume(struct device *device) | ||
317 | { | ||
318 | struct pci_dev *pdev = to_pci_dev(device); | ||
319 | struct pch_thermal_device *ptd = pci_get_drvdata(pdev); | ||
320 | |||
321 | return ptd->ops->resume(ptd); | ||
322 | } | ||
323 | |||
272 | static struct pci_device_id intel_pch_thermal_id[] = { | 324 | static struct pci_device_id intel_pch_thermal_id[] = { |
273 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, | 325 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) }, |
274 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, | 326 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) }, |
@@ -276,11 +328,17 @@ static struct pci_device_id intel_pch_thermal_id[] = { | |||
276 | }; | 328 | }; |
277 | MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); | 329 | MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); |
278 | 330 | ||
331 | static const struct dev_pm_ops intel_pch_pm_ops = { | ||
332 | .suspend = intel_pch_thermal_suspend, | ||
333 | .resume = intel_pch_thermal_resume, | ||
334 | }; | ||
335 | |||
279 | static struct pci_driver intel_pch_thermal_driver = { | 336 | static struct pci_driver intel_pch_thermal_driver = { |
280 | .name = "intel_pch_thermal", | 337 | .name = "intel_pch_thermal", |
281 | .id_table = intel_pch_thermal_id, | 338 | .id_table = intel_pch_thermal_id, |
282 | .probe = intel_pch_thermal_probe, | 339 | .probe = intel_pch_thermal_probe, |
283 | .remove = intel_pch_thermal_remove, | 340 | .remove = intel_pch_thermal_remove, |
341 | .driver.pm = &intel_pch_pm_ops, | ||
284 | }; | 342 | }; |
285 | 343 | ||
286 | module_pci_driver(intel_pch_thermal_driver); | 344 | module_pci_driver(intel_pch_thermal_driver); |
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 015ce2eb6eb7..0e4dc0afcfd2 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c | |||
@@ -388,7 +388,7 @@ static int clamp_thread(void *arg) | |||
388 | int sleeptime; | 388 | int sleeptime; |
389 | unsigned long target_jiffies; | 389 | unsigned long target_jiffies; |
390 | unsigned int guard; | 390 | unsigned int guard; |
391 | unsigned int compensation = 0; | 391 | unsigned int compensated_ratio; |
392 | int interval; /* jiffies to sleep for each attempt */ | 392 | int interval; /* jiffies to sleep for each attempt */ |
393 | unsigned int duration_jiffies = msecs_to_jiffies(duration); | 393 | unsigned int duration_jiffies = msecs_to_jiffies(duration); |
394 | unsigned int window_size_now; | 394 | unsigned int window_size_now; |
@@ -409,8 +409,11 @@ static int clamp_thread(void *arg) | |||
409 | * c-states, thus we need to compensate the injected idle ratio | 409 | * c-states, thus we need to compensate the injected idle ratio |
410 | * to achieve the actual target reported by the HW. | 410 | * to achieve the actual target reported by the HW. |
411 | */ | 411 | */ |
412 | compensation = get_compensation(target_ratio); | 412 | compensated_ratio = target_ratio + |
413 | interval = duration_jiffies*100/(target_ratio+compensation); | 413 | get_compensation(target_ratio); |
414 | if (compensated_ratio <= 0) | ||
415 | compensated_ratio = 1; | ||
416 | interval = duration_jiffies * 100 / compensated_ratio; | ||
414 | 417 | ||
415 | /* align idle time */ | 418 | /* align idle time */ |
416 | target_jiffies = roundup(jiffies, interval); | 419 | target_jiffies = roundup(jiffies, interval); |
@@ -647,8 +650,8 @@ static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev, | |||
647 | goto exit_set; | 650 | goto exit_set; |
648 | } else if (set_target_ratio > 0 && new_target_ratio == 0) { | 651 | } else if (set_target_ratio > 0 && new_target_ratio == 0) { |
649 | pr_info("Stop forced idle injection\n"); | 652 | pr_info("Stop forced idle injection\n"); |
650 | set_target_ratio = 0; | ||
651 | end_power_clamp(); | 653 | end_power_clamp(); |
654 | set_target_ratio = 0; | ||
652 | } else /* adjust currently running */ { | 655 | } else /* adjust currently running */ { |
653 | set_target_ratio = new_target_ratio; | 656 | set_target_ratio = new_target_ratio; |
654 | /* make new set_target_ratio visible to other cpus */ | 657 | /* make new set_target_ratio visible to other cpus */ |
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 2f1a863a8e15..b4d3116cfdaf 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c | |||
@@ -529,7 +529,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz) | |||
529 | continue; | 529 | continue; |
530 | 530 | ||
531 | instance->target = 0; | 531 | instance->target = 0; |
532 | mutex_lock(&instance->cdev->lock); | ||
532 | instance->cdev->updated = false; | 533 | instance->cdev->updated = false; |
534 | mutex_unlock(&instance->cdev->lock); | ||
533 | thermal_cdev_update(instance->cdev); | 535 | thermal_cdev_update(instance->cdev); |
534 | } | 536 | } |
535 | } | 537 | } |
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index ea9366ad3e6b..bcef2e7c4ec9 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c | |||
@@ -175,7 +175,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) | |||
175 | update_passive_instance(tz, trip_type, -1); | 175 | update_passive_instance(tz, trip_type, -1); |
176 | 176 | ||
177 | instance->initialized = true; | 177 | instance->initialized = true; |
178 | mutex_lock(&instance->cdev->lock); | ||
178 | instance->cdev->updated = false; /* cdev needs update */ | 179 | instance->cdev->updated = false; /* cdev needs update */ |
180 | mutex_unlock(&instance->cdev->lock); | ||
179 | } | 181 | } |
180 | 182 | ||
181 | mutex_unlock(&tz->lock); | 183 | mutex_unlock(&tz->lock); |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 5133cd1e10b7..e2fc6161dded 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
@@ -1093,7 +1093,9 @@ int power_actor_set_power(struct thermal_cooling_device *cdev, | |||
1093 | return ret; | 1093 | return ret; |
1094 | 1094 | ||
1095 | instance->target = state; | 1095 | instance->target = state; |
1096 | mutex_lock(&cdev->lock); | ||
1096 | cdev->updated = false; | 1097 | cdev->updated = false; |
1098 | mutex_unlock(&cdev->lock); | ||
1097 | thermal_cdev_update(cdev); | 1099 | thermal_cdev_update(cdev); |
1098 | 1100 | ||
1099 | return 0; | 1101 | return 0; |
@@ -1623,11 +1625,13 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) | |||
1623 | struct thermal_instance *instance; | 1625 | struct thermal_instance *instance; |
1624 | unsigned long target = 0; | 1626 | unsigned long target = 0; |
1625 | 1627 | ||
1628 | mutex_lock(&cdev->lock); | ||
1626 | /* cooling device is updated*/ | 1629 | /* cooling device is updated*/ |
1627 | if (cdev->updated) | 1630 | if (cdev->updated) { |
1631 | mutex_unlock(&cdev->lock); | ||
1628 | return; | 1632 | return; |
1633 | } | ||
1629 | 1634 | ||
1630 | mutex_lock(&cdev->lock); | ||
1631 | /* Make sure cdev enters the deepest cooling state */ | 1635 | /* Make sure cdev enters the deepest cooling state */ |
1632 | list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { | 1636 | list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) { |
1633 | dev_dbg(&cdev->device, "zone%d->target=%lu\n", | 1637 | dev_dbg(&cdev->device, "zone%d->target=%lu\n", |
@@ -1637,9 +1641,9 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) | |||
1637 | if (instance->target > target) | 1641 | if (instance->target > target) |
1638 | target = instance->target; | 1642 | target = instance->target; |
1639 | } | 1643 | } |
1640 | mutex_unlock(&cdev->lock); | ||
1641 | cdev->ops->set_cur_state(cdev, target); | 1644 | cdev->ops->set_cur_state(cdev, target); |
1642 | cdev->updated = true; | 1645 | cdev->updated = true; |
1646 | mutex_unlock(&cdev->lock); | ||
1643 | trace_cdev_update(cdev, target); | 1647 | trace_cdev_update(cdev, target); |
1644 | dev_dbg(&cdev->device, "set to state %lu\n", target); | 1648 | dev_dbg(&cdev->device, "set to state %lu\n", target); |
1645 | } | 1649 | } |
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index 06fd2ed9ef9d..c41c7742903a 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c | |||
@@ -232,6 +232,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) | |||
232 | 232 | ||
233 | return result; | 233 | return result; |
234 | } | 234 | } |
235 | EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs); | ||
235 | 236 | ||
236 | void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) | 237 | void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) |
237 | { | 238 | { |
@@ -270,3 +271,4 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) | |||
270 | hwmon_device_unregister(hwmon->device); | 271 | hwmon_device_unregister(hwmon->device); |
271 | kfree(hwmon); | 272 | kfree(hwmon); |
272 | } | 273 | } |
274 | EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs); | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 71912301ef7f..0f3f62e81e5b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1354,7 +1354,6 @@ made_compressed_probe: | |||
1354 | spin_lock_init(&acm->write_lock); | 1354 | spin_lock_init(&acm->write_lock); |
1355 | spin_lock_init(&acm->read_lock); | 1355 | spin_lock_init(&acm->read_lock); |
1356 | mutex_init(&acm->mutex); | 1356 | mutex_init(&acm->mutex); |
1357 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); | ||
1358 | acm->is_int_ep = usb_endpoint_xfer_int(epread); | 1357 | acm->is_int_ep = usb_endpoint_xfer_int(epread); |
1359 | if (acm->is_int_ep) | 1358 | if (acm->is_int_ep) |
1360 | acm->bInterval = epread->bInterval; | 1359 | acm->bInterval = epread->bInterval; |
@@ -1394,14 +1393,14 @@ made_compressed_probe: | |||
1394 | urb->transfer_dma = rb->dma; | 1393 | urb->transfer_dma = rb->dma; |
1395 | if (acm->is_int_ep) { | 1394 | if (acm->is_int_ep) { |
1396 | usb_fill_int_urb(urb, acm->dev, | 1395 | usb_fill_int_urb(urb, acm->dev, |
1397 | acm->rx_endpoint, | 1396 | usb_rcvintpipe(usb_dev, epread->bEndpointAddress), |
1398 | rb->base, | 1397 | rb->base, |
1399 | acm->readsize, | 1398 | acm->readsize, |
1400 | acm_read_bulk_callback, rb, | 1399 | acm_read_bulk_callback, rb, |
1401 | acm->bInterval); | 1400 | acm->bInterval); |
1402 | } else { | 1401 | } else { |
1403 | usb_fill_bulk_urb(urb, acm->dev, | 1402 | usb_fill_bulk_urb(urb, acm->dev, |
1404 | acm->rx_endpoint, | 1403 | usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), |
1405 | rb->base, | 1404 | rb->base, |
1406 | acm->readsize, | 1405 | acm->readsize, |
1407 | acm_read_bulk_callback, rb); | 1406 | acm_read_bulk_callback, rb); |
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 05ce308d5d2a..1f1eabfd8462 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
@@ -96,7 +96,6 @@ struct acm { | |||
96 | struct acm_rb read_buffers[ACM_NR]; | 96 | struct acm_rb read_buffers[ACM_NR]; |
97 | struct acm_wb *putbuffer; /* for acm_tty_put_char() */ | 97 | struct acm_wb *putbuffer; /* for acm_tty_put_char() */ |
98 | int rx_buflimit; | 98 | int rx_buflimit; |
99 | int rx_endpoint; | ||
100 | spinlock_t read_lock; | 99 | spinlock_t read_lock; |
101 | int write_used; /* number of non-empty write buffers */ | 100 | int write_used; /* number of non-empty write buffers */ |
102 | int transmitting; | 101 | int transmitting; |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 31ccdccd7a04..051163189810 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
171 | ep, buffer, size); | 171 | ep, buffer, size); |
172 | } | 172 | } |
173 | 173 | ||
174 | static const unsigned short low_speed_maxpacket_maxes[4] = { | ||
175 | [USB_ENDPOINT_XFER_CONTROL] = 8, | ||
176 | [USB_ENDPOINT_XFER_ISOC] = 0, | ||
177 | [USB_ENDPOINT_XFER_BULK] = 0, | ||
178 | [USB_ENDPOINT_XFER_INT] = 8, | ||
179 | }; | ||
180 | static const unsigned short full_speed_maxpacket_maxes[4] = { | ||
181 | [USB_ENDPOINT_XFER_CONTROL] = 64, | ||
182 | [USB_ENDPOINT_XFER_ISOC] = 1023, | ||
183 | [USB_ENDPOINT_XFER_BULK] = 64, | ||
184 | [USB_ENDPOINT_XFER_INT] = 64, | ||
185 | }; | ||
186 | static const unsigned short high_speed_maxpacket_maxes[4] = { | ||
187 | [USB_ENDPOINT_XFER_CONTROL] = 64, | ||
188 | [USB_ENDPOINT_XFER_ISOC] = 1024, | ||
189 | [USB_ENDPOINT_XFER_BULK] = 512, | ||
190 | [USB_ENDPOINT_XFER_INT] = 1023, | ||
191 | }; | ||
192 | static const unsigned short super_speed_maxpacket_maxes[4] = { | ||
193 | [USB_ENDPOINT_XFER_CONTROL] = 512, | ||
194 | [USB_ENDPOINT_XFER_ISOC] = 1024, | ||
195 | [USB_ENDPOINT_XFER_BULK] = 1024, | ||
196 | [USB_ENDPOINT_XFER_INT] = 1024, | ||
197 | }; | ||
198 | |||
174 | static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | 199 | static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, |
175 | int asnum, struct usb_host_interface *ifp, int num_ep, | 200 | int asnum, struct usb_host_interface *ifp, int num_ep, |
176 | unsigned char *buffer, int size) | 201 | unsigned char *buffer, int size) |
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
179 | struct usb_endpoint_descriptor *d; | 204 | struct usb_endpoint_descriptor *d; |
180 | struct usb_host_endpoint *endpoint; | 205 | struct usb_host_endpoint *endpoint; |
181 | int n, i, j, retval; | 206 | int n, i, j, retval; |
207 | unsigned int maxp; | ||
208 | const unsigned short *maxpacket_maxes; | ||
182 | 209 | ||
183 | d = (struct usb_endpoint_descriptor *) buffer; | 210 | d = (struct usb_endpoint_descriptor *) buffer; |
184 | buffer += d->bLength; | 211 | buffer += d->bLength; |
@@ -286,6 +313,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
286 | endpoint->desc.wMaxPacketSize = cpu_to_le16(8); | 313 | endpoint->desc.wMaxPacketSize = cpu_to_le16(8); |
287 | } | 314 | } |
288 | 315 | ||
316 | /* Validate the wMaxPacketSize field */ | ||
317 | maxp = usb_endpoint_maxp(&endpoint->desc); | ||
318 | |||
319 | /* Find the highest legal maxpacket size for this endpoint */ | ||
320 | i = 0; /* additional transactions per microframe */ | ||
321 | switch (to_usb_device(ddev)->speed) { | ||
322 | case USB_SPEED_LOW: | ||
323 | maxpacket_maxes = low_speed_maxpacket_maxes; | ||
324 | break; | ||
325 | case USB_SPEED_FULL: | ||
326 | maxpacket_maxes = full_speed_maxpacket_maxes; | ||
327 | break; | ||
328 | case USB_SPEED_HIGH: | ||
329 | /* Bits 12..11 are allowed only for HS periodic endpoints */ | ||
330 | if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { | ||
331 | i = maxp & (BIT(12) | BIT(11)); | ||
332 | maxp &= ~i; | ||
333 | } | ||
334 | /* fallthrough */ | ||
335 | default: | ||
336 | maxpacket_maxes = high_speed_maxpacket_maxes; | ||
337 | break; | ||
338 | case USB_SPEED_SUPER: | ||
339 | case USB_SPEED_SUPER_PLUS: | ||
340 | maxpacket_maxes = super_speed_maxpacket_maxes; | ||
341 | break; | ||
342 | } | ||
343 | j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; | ||
344 | |||
345 | if (maxp > j) { | ||
346 | dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", | ||
347 | cfgno, inum, asnum, d->bEndpointAddress, maxp, j); | ||
348 | maxp = j; | ||
349 | endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); | ||
350 | } | ||
351 | |||
289 | /* | 352 | /* |
290 | * Some buggy high speed devices have bulk endpoints using | 353 | * Some buggy high speed devices have bulk endpoints using |
291 | * maxpacket sizes other than 512. High speed HCDs may not | 354 | * maxpacket sizes other than 512. High speed HCDs may not |
@@ -293,9 +356,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
293 | */ | 356 | */ |
294 | if (to_usb_device(ddev)->speed == USB_SPEED_HIGH | 357 | if (to_usb_device(ddev)->speed == USB_SPEED_HIGH |
295 | && usb_endpoint_xfer_bulk(d)) { | 358 | && usb_endpoint_xfer_bulk(d)) { |
296 | unsigned maxp; | ||
297 | |||
298 | maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; | ||
299 | if (maxp != 512) | 359 | if (maxp != 512) |
300 | dev_warn(ddev, "config %d interface %d altsetting %d " | 360 | dev_warn(ddev, "config %d interface %d altsetting %d " |
301 | "bulk endpoint 0x%X has invalid maxpacket %d\n", | 361 | "bulk endpoint 0x%X has invalid maxpacket %d\n", |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9f5043a2167..e6a6d67c8705 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) | |||
241 | goto error_decrease_mem; | 241 | goto error_decrease_mem; |
242 | } | 242 | } |
243 | 243 | ||
244 | mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle); | 244 | mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, |
245 | &dma_handle); | ||
245 | if (!mem) { | 246 | if (!mem) { |
246 | ret = -ENOMEM; | 247 | ret = -ENOMEM; |
247 | goto error_free_usbm; | 248 | goto error_free_usbm; |
@@ -2582,7 +2583,9 @@ static unsigned int usbdev_poll(struct file *file, | |||
2582 | if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) | 2583 | if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) |
2583 | mask |= POLLOUT | POLLWRNORM; | 2584 | mask |= POLLOUT | POLLWRNORM; |
2584 | if (!connected(ps)) | 2585 | if (!connected(ps)) |
2585 | mask |= POLLERR | POLLHUP; | 2586 | mask |= POLLHUP; |
2587 | if (list_empty(&ps->list)) | ||
2588 | mask |= POLLERR; | ||
2586 | return mask; | 2589 | return mask; |
2587 | } | 2590 | } |
2588 | 2591 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bee13517676f..1d5fc32d06d0 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1052 | 1052 | ||
1053 | /* Continue a partial initialization */ | 1053 | /* Continue a partial initialization */ |
1054 | if (type == HUB_INIT2 || type == HUB_INIT3) { | 1054 | if (type == HUB_INIT2 || type == HUB_INIT3) { |
1055 | device_lock(hub->intfdev); | 1055 | device_lock(&hdev->dev); |
1056 | 1056 | ||
1057 | /* Was the hub disconnected while we were waiting? */ | 1057 | /* Was the hub disconnected while we were waiting? */ |
1058 | if (hub->disconnected) { | 1058 | if (hub->disconnected) |
1059 | device_unlock(hub->intfdev); | 1059 | goto disconnected; |
1060 | kref_put(&hub->kref, hub_release); | ||
1061 | return; | ||
1062 | } | ||
1063 | if (type == HUB_INIT2) | 1060 | if (type == HUB_INIT2) |
1064 | goto init2; | 1061 | goto init2; |
1065 | goto init3; | 1062 | goto init3; |
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1262 | queue_delayed_work(system_power_efficient_wq, | 1259 | queue_delayed_work(system_power_efficient_wq, |
1263 | &hub->init_work, | 1260 | &hub->init_work, |
1264 | msecs_to_jiffies(delay)); | 1261 | msecs_to_jiffies(delay)); |
1265 | device_unlock(hub->intfdev); | 1262 | device_unlock(&hdev->dev); |
1266 | return; /* Continues at init3: below */ | 1263 | return; /* Continues at init3: below */ |
1267 | } else { | 1264 | } else { |
1268 | msleep(delay); | 1265 | msleep(delay); |
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1281 | /* Scan all ports that need attention */ | 1278 | /* Scan all ports that need attention */ |
1282 | kick_hub_wq(hub); | 1279 | kick_hub_wq(hub); |
1283 | 1280 | ||
1284 | /* Allow autosuspend if it was suppressed */ | 1281 | if (type == HUB_INIT2 || type == HUB_INIT3) { |
1285 | if (type <= HUB_INIT3) | 1282 | /* Allow autosuspend if it was suppressed */ |
1283 | disconnected: | ||
1286 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); | 1284 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); |
1287 | 1285 | device_unlock(&hdev->dev); | |
1288 | if (type == HUB_INIT2 || type == HUB_INIT3) | 1286 | } |
1289 | device_unlock(hub->intfdev); | ||
1290 | 1287 | ||
1291 | kref_put(&hub->kref, hub_release); | 1288 | kref_put(&hub->kref, hub_release); |
1292 | } | 1289 | } |
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) | |||
1315 | struct usb_device *hdev = hub->hdev; | 1312 | struct usb_device *hdev = hub->hdev; |
1316 | int i; | 1313 | int i; |
1317 | 1314 | ||
1318 | cancel_delayed_work_sync(&hub->init_work); | ||
1319 | |||
1320 | /* hub_wq and related activity won't re-trigger */ | 1315 | /* hub_wq and related activity won't re-trigger */ |
1321 | hub->quiescing = 1; | 1316 | hub->quiescing = 1; |
1322 | 1317 | ||
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 974335377d9f..e56d59b19a0e 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev) | |||
61 | if (!simple->clks) | 61 | if (!simple->clks) |
62 | return -ENOMEM; | 62 | return -ENOMEM; |
63 | 63 | ||
64 | platform_set_drvdata(pdev, simple); | ||
64 | simple->dev = dev; | 65 | simple->dev = dev; |
65 | 66 | ||
66 | for (i = 0; i < simple->num_clocks; i++) { | 67 | for (i = 0; i < simple->num_clocks; i++) { |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 45f5a232d9fb..2eb84d6c24a6 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa | 37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa |
38 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa | 38 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa |
39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa | 39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa |
40 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 | ||
40 | 41 | ||
41 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; | 42 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; |
42 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; | 43 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; |
@@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |||
227 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, | 228 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, |
228 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, | 229 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, |
229 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, | 230 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, |
231 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, | ||
230 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, | 232 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, |
231 | { } /* Terminating Entry */ | 233 | { } /* Terminating Entry */ |
232 | }; | 234 | }; |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8f8c2157910e..1f5597ef945d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, | |||
829 | if (!req->request.no_interrupt && !chain) | 829 | if (!req->request.no_interrupt && !chain) |
830 | trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; | 830 | trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; |
831 | 831 | ||
832 | if (last) | 832 | if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc)) |
833 | trb->ctrl |= DWC3_TRB_CTRL_LST; | 833 | trb->ctrl |= DWC3_TRB_CTRL_LST; |
834 | 834 | ||
835 | if (chain) | 835 | if (chain) |
@@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) | |||
1955 | 1955 | ||
1956 | static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | 1956 | static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, |
1957 | struct dwc3_request *req, struct dwc3_trb *trb, | 1957 | struct dwc3_request *req, struct dwc3_trb *trb, |
1958 | const struct dwc3_event_depevt *event, int status) | 1958 | const struct dwc3_event_depevt *event, int status, |
1959 | int chain) | ||
1959 | { | 1960 | { |
1960 | unsigned int count; | 1961 | unsigned int count; |
1961 | unsigned int s_pkt = 0; | 1962 | unsigned int s_pkt = 0; |
@@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
1964 | dep->queued_requests--; | 1965 | dep->queued_requests--; |
1965 | trace_dwc3_complete_trb(dep, trb); | 1966 | trace_dwc3_complete_trb(dep, trb); |
1966 | 1967 | ||
1968 | /* | ||
1969 | * If we're in the middle of series of chained TRBs and we | ||
1970 | * receive a short transfer along the way, DWC3 will skip | ||
1971 | * through all TRBs including the last TRB in the chain (the | ||
1972 | * where CHN bit is zero. DWC3 will also avoid clearing HWO | ||
1973 | * bit and SW has to do it manually. | ||
1974 | * | ||
1975 | * We're going to do that here to avoid problems of HW trying | ||
1976 | * to use bogus TRBs for transfers. | ||
1977 | */ | ||
1978 | if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) | ||
1979 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
1980 | |||
1967 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) | 1981 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) |
1968 | /* | 1982 | return 1; |
1969 | * We continue despite the error. There is not much we | 1983 | |
1970 | * can do. If we don't clean it up we loop forever. If | ||
1971 | * we skip the TRB then it gets overwritten after a | ||
1972 | * while since we use them in a ring buffer. A BUG() | ||
1973 | * would help. Lets hope that if this occurs, someone | ||
1974 | * fixes the root cause instead of looking away :) | ||
1975 | */ | ||
1976 | dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", | ||
1977 | dep->name, trb); | ||
1978 | count = trb->size & DWC3_TRB_SIZE_MASK; | 1984 | count = trb->size & DWC3_TRB_SIZE_MASK; |
1979 | 1985 | ||
1980 | if (dep->direction) { | 1986 | if (dep->direction) { |
@@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2013 | s_pkt = 1; | 2019 | s_pkt = 1; |
2014 | } | 2020 | } |
2015 | 2021 | ||
2016 | /* | 2022 | if (s_pkt && !chain) |
2017 | * We assume here we will always receive the entire data block | ||
2018 | * which we should receive. Meaning, if we program RX to | ||
2019 | * receive 4K but we receive only 2K, we assume that's all we | ||
2020 | * should receive and we simply bounce the request back to the | ||
2021 | * gadget driver for further processing. | ||
2022 | */ | ||
2023 | req->request.actual += req->request.length - count; | ||
2024 | if (s_pkt) | ||
2025 | return 1; | 2023 | return 1; |
2026 | if ((event->status & DEPEVT_STATUS_LST) && | 2024 | if ((event->status & DEPEVT_STATUS_LST) && |
2027 | (trb->ctrl & (DWC3_TRB_CTRL_LST | | 2025 | (trb->ctrl & (DWC3_TRB_CTRL_LST | |
@@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2040 | struct dwc3_trb *trb; | 2038 | struct dwc3_trb *trb; |
2041 | unsigned int slot; | 2039 | unsigned int slot; |
2042 | unsigned int i; | 2040 | unsigned int i; |
2041 | int count = 0; | ||
2043 | int ret; | 2042 | int ret; |
2044 | 2043 | ||
2045 | do { | 2044 | do { |
2045 | int chain; | ||
2046 | |||
2046 | req = next_request(&dep->started_list); | 2047 | req = next_request(&dep->started_list); |
2047 | if (WARN_ON_ONCE(!req)) | 2048 | if (WARN_ON_ONCE(!req)) |
2048 | return 1; | 2049 | return 1; |
2049 | 2050 | ||
2051 | chain = req->request.num_mapped_sgs > 0; | ||
2050 | i = 0; | 2052 | i = 0; |
2051 | do { | 2053 | do { |
2052 | slot = req->first_trb_index + i; | 2054 | slot = req->first_trb_index + i; |
@@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2054 | slot++; | 2056 | slot++; |
2055 | slot %= DWC3_TRB_NUM; | 2057 | slot %= DWC3_TRB_NUM; |
2056 | trb = &dep->trb_pool[slot]; | 2058 | trb = &dep->trb_pool[slot]; |
2059 | count += trb->size & DWC3_TRB_SIZE_MASK; | ||
2057 | 2060 | ||
2058 | ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, | 2061 | ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, |
2059 | event, status); | 2062 | event, status, chain); |
2060 | if (ret) | 2063 | if (ret) |
2061 | break; | 2064 | break; |
2062 | } while (++i < req->request.num_mapped_sgs); | 2065 | } while (++i < req->request.num_mapped_sgs); |
2063 | 2066 | ||
2067 | /* | ||
2068 | * We assume here we will always receive the entire data block | ||
2069 | * which we should receive. Meaning, if we program RX to | ||
2070 | * receive 4K but we receive only 2K, we assume that's all we | ||
2071 | * should receive and we simply bounce the request back to the | ||
2072 | * gadget driver for further processing. | ||
2073 | */ | ||
2074 | req->request.actual += req->request.length - count; | ||
2064 | dwc3_gadget_giveback(dep, req, status); | 2075 | dwc3_gadget_giveback(dep, req, status); |
2065 | 2076 | ||
2066 | if (ret) | 2077 | if (ret) |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index eb648485a58c..5ebe6af7976e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -1913,6 +1913,8 @@ unknown: | |||
1913 | break; | 1913 | break; |
1914 | 1914 | ||
1915 | case USB_RECIP_ENDPOINT: | 1915 | case USB_RECIP_ENDPOINT: |
1916 | if (!cdev->config) | ||
1917 | break; | ||
1916 | endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); | 1918 | endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); |
1917 | list_for_each_entry(f, &cdev->config->functions, list) { | 1919 | list_for_each_entry(f, &cdev->config->functions, list) { |
1918 | if (test_bit(endp, f->endpoints)) | 1920 | if (test_bit(endp, f->endpoints)) |
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, | |||
2124 | 2126 | ||
2125 | cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); | 2127 | cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); |
2126 | if (!cdev->os_desc_req) { | 2128 | if (!cdev->os_desc_req) { |
2127 | ret = PTR_ERR(cdev->os_desc_req); | 2129 | ret = -ENOMEM; |
2128 | goto end; | 2130 | goto end; |
2129 | } | 2131 | } |
2130 | 2132 | ||
2131 | /* OS feature descriptor length <= 4kB */ | 2133 | /* OS feature descriptor length <= 4kB */ |
2132 | cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); | 2134 | cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); |
2133 | if (!cdev->os_desc_req->buf) { | 2135 | if (!cdev->os_desc_req->buf) { |
2134 | ret = PTR_ERR(cdev->os_desc_req->buf); | 2136 | ret = -ENOMEM; |
2135 | kfree(cdev->os_desc_req); | 2137 | kfree(cdev->os_desc_req); |
2136 | goto end; | 2138 | goto end; |
2137 | } | 2139 | } |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 70cf3477f951..f9237fe2be05 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item) | |||
1490 | { | 1490 | { |
1491 | struct gadget_info *gi = to_gadget_info(item); | 1491 | struct gadget_info *gi = to_gadget_info(item); |
1492 | 1492 | ||
1493 | mutex_lock(&gi->lock); | ||
1493 | unregister_gadget(gi); | 1494 | unregister_gadget(gi); |
1495 | mutex_unlock(&gi->lock); | ||
1494 | } | 1496 | } |
1495 | EXPORT_SYMBOL_GPL(unregister_gadget_item); | 1497 | EXPORT_SYMBOL_GPL(unregister_gadget_item); |
1496 | 1498 | ||
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 943c21aafd3b..ab6ac1b74ac0 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c | |||
@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params, | |||
680 | { | 680 | { |
681 | rndis_reset_cmplt_type *resp; | 681 | rndis_reset_cmplt_type *resp; |
682 | rndis_resp_t *r; | 682 | rndis_resp_t *r; |
683 | u8 *xbuf; | ||
684 | u32 length; | ||
685 | |||
686 | /* drain the response queue */ | ||
687 | while ((xbuf = rndis_get_next_response(params, &length))) | ||
688 | rndis_free_response(params, xbuf); | ||
683 | 689 | ||
684 | r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); | 690 | r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); |
685 | if (!r) | 691 | if (!r) |
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index a3f7e7c55ebb..5f562c1ec795 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c | |||
@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, | |||
556 | /* Multi frame CDC protocols may store the frame for | 556 | /* Multi frame CDC protocols may store the frame for |
557 | * later which is not a dropped frame. | 557 | * later which is not a dropped frame. |
558 | */ | 558 | */ |
559 | if (dev->port_usb->supports_multi_frame) | 559 | if (dev->port_usb && |
560 | dev->port_usb->supports_multi_frame) | ||
560 | goto multiframe; | 561 | goto multiframe; |
561 | goto drop; | 562 | goto drop; |
562 | } | 563 | } |
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 66753ba7a42e..31125a4a2658 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c | |||
@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, | |||
2023 | if (!data) { | 2023 | if (!data) { |
2024 | kfree(*class_array); | 2024 | kfree(*class_array); |
2025 | *class_array = NULL; | 2025 | *class_array = NULL; |
2026 | ret = PTR_ERR(data); | 2026 | ret = -ENOMEM; |
2027 | goto unlock; | 2027 | goto unlock; |
2028 | } | 2028 | } |
2029 | cl_arr = *class_array; | 2029 | cl_arr = *class_array; |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index aa3707bdebb4..16104b5ebdcb 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb, | |||
542 | */ | 542 | */ |
543 | spin_lock_irq(&epdata->dev->lock); | 543 | spin_lock_irq(&epdata->dev->lock); |
544 | value = -ENODEV; | 544 | value = -ENODEV; |
545 | if (unlikely(epdata->ep)) | 545 | if (unlikely(epdata->ep == NULL)) |
546 | goto fail; | 546 | goto fail; |
547 | 547 | ||
548 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 548 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
606 | } | 606 | } |
607 | if (is_sync_kiocb(iocb)) { | 607 | if (is_sync_kiocb(iocb)) { |
608 | value = ep_io(epdata, buf, len); | 608 | value = ep_io(epdata, buf, len); |
609 | if (value >= 0 && copy_to_iter(buf, value, to)) | 609 | if (value >= 0 && (copy_to_iter(buf, value, to) != value)) |
610 | value = -EFAULT; | 610 | value = -EFAULT; |
611 | } else { | 611 | } else { |
612 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | 612 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); |
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index ff8685ea7219..934f83881c30 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c | |||
@@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1145 | if (ret != -EPROBE_DEFER) | 1145 | if (ret != -EPROBE_DEFER) |
1146 | list_del(&driver->pending); | 1146 | list_del(&driver->pending); |
1147 | if (ret) | 1147 | if (ret) |
1148 | goto err4; | 1148 | goto err5; |
1149 | break; | 1149 | break; |
1150 | } | 1150 | } |
1151 | } | 1151 | } |
@@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1154 | 1154 | ||
1155 | return 0; | 1155 | return 0; |
1156 | 1156 | ||
1157 | err5: | ||
1158 | device_del(&udc->dev); | ||
1159 | |||
1157 | err4: | 1160 | err4: |
1158 | list_del(&udc->list); | 1161 | list_del(&udc->list); |
1159 | mutex_unlock(&udc_lock); | 1162 | mutex_unlock(&udc_lock); |
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 93d28cb00b76..cf8819a5c5b2 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c | |||
@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc, | |||
2053 | struct qe_ep *ep; | 2053 | struct qe_ep *ep; |
2054 | 2054 | ||
2055 | if (wValue != 0 || wLength != 0 | 2055 | if (wValue != 0 || wLength != 0 |
2056 | || pipe > USB_MAX_ENDPOINTS) | 2056 | || pipe >= USB_MAX_ENDPOINTS) |
2057 | break; | 2057 | break; |
2058 | ep = &udc->eps[pipe]; | 2058 | ep = &udc->eps[pipe]; |
2059 | 2059 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a962b89b65a6..1e5f529d51a2 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) | |||
332 | int port = HCS_N_PORTS(ehci->hcs_params); | 332 | int port = HCS_N_PORTS(ehci->hcs_params); |
333 | 333 | ||
334 | while (port--) { | 334 | while (port--) { |
335 | ehci_writel(ehci, PORT_RWC_BITS, | ||
336 | &ehci->regs->port_status[port]); | ||
337 | spin_unlock_irq(&ehci->lock); | 335 | spin_unlock_irq(&ehci->lock); |
338 | ehci_port_power(ehci, port, false); | 336 | ehci_port_power(ehci, port, false); |
339 | spin_lock_irq(&ehci->lock); | 337 | spin_lock_irq(&ehci->lock); |
338 | ehci_writel(ehci, PORT_RWC_BITS, | ||
339 | &ehci->regs->port_status[port]); | ||
340 | } | 340 | } |
341 | } | 341 | } |
342 | 342 | ||
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index c369c29e496d..2f7690092a7f 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c | |||
@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value) | |||
1675 | if (pin_number > 7) | 1675 | if (pin_number > 7) |
1676 | return; | 1676 | return; |
1677 | 1677 | ||
1678 | mask = 1u << pin_number; | 1678 | mask = 1u << (pin_number % 4); |
1679 | idx = pin_number / 4; | 1679 | idx = pin_number / 4; |
1680 | 1680 | ||
1681 | if (value) | 1681 | if (value) |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index d61fcc48099e..730b9fd26685 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
386 | 386 | ||
387 | ret = 0; | 387 | ret = 0; |
388 | virt_dev = xhci->devs[slot_id]; | 388 | virt_dev = xhci->devs[slot_id]; |
389 | if (!virt_dev) | ||
390 | return -ENODEV; | ||
391 | |||
389 | cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); | 392 | cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); |
390 | if (!cmd) { | 393 | if (!cmd) { |
391 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | 394 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4fd041bec332..d7b0f97abbad 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev) | |||
314 | usb_remove_hcd(xhci->shared_hcd); | 314 | usb_remove_hcd(xhci->shared_hcd); |
315 | usb_put_hcd(xhci->shared_hcd); | 315 | usb_put_hcd(xhci->shared_hcd); |
316 | } | 316 | } |
317 | usb_hcd_pci_remove(dev); | ||
318 | 317 | ||
319 | /* Workaround for spurious wakeups at shutdown with HSW */ | 318 | /* Workaround for spurious wakeups at shutdown with HSW */ |
320 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) | 319 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) |
321 | pci_set_power_state(dev, PCI_D3hot); | 320 | pci_set_power_state(dev, PCI_D3hot); |
321 | |||
322 | usb_hcd_pci_remove(dev); | ||
322 | } | 323 | } |
323 | 324 | ||
324 | #ifdef CONFIG_PM | 325 | #ifdef CONFIG_PM |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 918e0c739b79..fd9fd12e4861 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1334,12 +1334,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1334 | 1334 | ||
1335 | cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); | 1335 | cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); |
1336 | 1336 | ||
1337 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { | ||
1338 | xhci_err(xhci, | ||
1339 | "Command completion event does not match command\n"); | ||
1340 | return; | ||
1341 | } | ||
1342 | |||
1343 | del_timer(&xhci->cmd_timer); | 1337 | del_timer(&xhci->cmd_timer); |
1344 | 1338 | ||
1345 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); | 1339 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); |
@@ -1351,6 +1345,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1351 | xhci_handle_stopped_cmd_ring(xhci, cmd); | 1345 | xhci_handle_stopped_cmd_ring(xhci, cmd); |
1352 | return; | 1346 | return; |
1353 | } | 1347 | } |
1348 | |||
1349 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { | ||
1350 | xhci_err(xhci, | ||
1351 | "Command completion event does not match command\n"); | ||
1352 | return; | ||
1353 | } | ||
1354 | |||
1354 | /* | 1355 | /* |
1355 | * Host aborted the command ring, check if the current command was | 1356 | * Host aborted the command ring, check if the current command was |
1356 | * supposed to be aborted, otherwise continue normally. | 1357 | * supposed to be aborted, otherwise continue normally. |
@@ -3243,7 +3244,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3243 | send_addr = addr; | 3244 | send_addr = addr; |
3244 | 3245 | ||
3245 | /* Queue the TRBs, even if they are zero-length */ | 3246 | /* Queue the TRBs, even if they are zero-length */ |
3246 | for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) { | 3247 | for (enqd_len = 0; first_trb || enqd_len < full_len; |
3248 | enqd_len += trb_buff_len) { | ||
3247 | field = TRB_TYPE(TRB_NORMAL); | 3249 | field = TRB_TYPE(TRB_NORMAL); |
3248 | 3250 | ||
3249 | /* TRB buffer should not cross 64KB boundaries */ | 3251 | /* TRB buffer should not cross 64KB boundaries */ |
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 52c27cab78c3..9b5b3b2281ca 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c | |||
@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer, | |||
665 | { | 665 | { |
666 | char data[30 *3 + 4]; | 666 | char data[30 *3 + 4]; |
667 | char *d = data; | 667 | char *d = data; |
668 | int m = (sizeof(data) - 1) / 3; | 668 | int m = (sizeof(data) - 1) / 3 - 1; |
669 | int bytes_read = 0; | 669 | int bytes_read = 0; |
670 | int retry_on_empty = 10; | 670 | int retry_on_empty = 10; |
671 | int retry_on_timeout = 5; | 671 | int retry_on_timeout = 5; |
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) { | |||
1684 | int i = 0; | 1684 | int i = 0; |
1685 | char data[30 *3 + 4]; | 1685 | char data[30 *3 + 4]; |
1686 | char *d = data; | 1686 | char *d = data; |
1687 | int m = (sizeof(data) - 1) / 3; | 1687 | int m = (sizeof(data) - 1) / 3 - 1; |
1688 | int l = 0; | 1688 | int l = 0; |
1689 | struct u132_target *target = &ftdi->target[ed]; | 1689 | struct u132_target *target = &ftdi->target[ed]; |
1690 | struct u132_command *command = &ftdi->command[ | 1690 | struct u132_command *command = &ftdi->command[ |
@@ -1876,7 +1876,7 @@ more:{ | |||
1876 | if (packet_bytes > 2) { | 1876 | if (packet_bytes > 2) { |
1877 | char diag[30 *3 + 4]; | 1877 | char diag[30 *3 + 4]; |
1878 | char *d = diag; | 1878 | char *d = diag; |
1879 | int m = (sizeof(diag) - 1) / 3; | 1879 | int m = (sizeof(diag) - 1) / 3 - 1; |
1880 | char *b = ftdi->bulk_in_buffer; | 1880 | char *b = ftdi->bulk_in_buffer; |
1881 | int bytes_read = 0; | 1881 | int bytes_read = 0; |
1882 | diag[0] = 0; | 1882 | diag[0] = 0; |
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) | |||
2053 | if (packet_bytes > 2) { | 2053 | if (packet_bytes > 2) { |
2054 | char diag[30 *3 + 4]; | 2054 | char diag[30 *3 + 4]; |
2055 | char *d = diag; | 2055 | char *d = diag; |
2056 | int m = (sizeof(diag) - 1) / 3; | 2056 | int m = (sizeof(diag) - 1) / 3 - 1; |
2057 | char *b = ftdi->bulk_in_buffer; | 2057 | char *b = ftdi->bulk_in_buffer; |
2058 | int bytes_read = 0; | 2058 | int bytes_read = 0; |
2059 | unsigned char c = 0; | 2059 | unsigned char c = 0; |
@@ -2155,7 +2155,7 @@ more:{ | |||
2155 | if (packet_bytes > 2) { | 2155 | if (packet_bytes > 2) { |
2156 | char diag[30 *3 + 4]; | 2156 | char diag[30 *3 + 4]; |
2157 | char *d = diag; | 2157 | char *d = diag; |
2158 | int m = (sizeof(diag) - 1) / 3; | 2158 | int m = (sizeof(diag) - 1) / 3 - 1; |
2159 | char *b = ftdi->bulk_in_buffer; | 2159 | char *b = ftdi->bulk_in_buffer; |
2160 | int bytes_read = 0; | 2160 | int bytes_read = 0; |
2161 | diag[0] = 0; | 2161 | diag[0] = 0; |
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 6b978f04b8d7..5c8210dc6fd9 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c | |||
@@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req) | |||
585 | { | 585 | { |
586 | struct usb_sg_request *req = (struct usb_sg_request *) _req; | 586 | struct usb_sg_request *req = (struct usb_sg_request *) _req; |
587 | 587 | ||
588 | req->status = -ETIMEDOUT; | ||
589 | usb_sg_cancel(req); | 588 | usb_sg_cancel(req); |
590 | } | 589 | } |
591 | 590 | ||
@@ -616,8 +615,10 @@ static int perform_sglist( | |||
616 | mod_timer(&sg_timer, jiffies + | 615 | mod_timer(&sg_timer, jiffies + |
617 | msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); | 616 | msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); |
618 | usb_sg_wait(req); | 617 | usb_sg_wait(req); |
619 | del_timer_sync(&sg_timer); | 618 | if (!del_timer_sync(&sg_timer)) |
620 | retval = req->status; | 619 | retval = -ETIMEDOUT; |
620 | else | ||
621 | retval = req->status; | ||
621 | 622 | ||
622 | /* FIXME check resulting data pattern */ | 623 | /* FIXME check resulting data pattern */ |
623 | 624 | ||
@@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) | |||
2602 | ktime_get_ts64(&start); | 2603 | ktime_get_ts64(&start); |
2603 | 2604 | ||
2604 | retval = usbtest_do_ioctl(intf, param_32); | 2605 | retval = usbtest_do_ioctl(intf, param_32); |
2605 | if (retval) | 2606 | if (retval < 0) |
2606 | goto free_mutex; | 2607 | goto free_mutex; |
2607 | 2608 | ||
2608 | ktime_get_ts64(&end); | 2609 | ktime_get_ts64(&end); |
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 6f6d2a7fd5a0..6523af4f8f93 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c | |||
@@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev) | |||
140 | (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, | 140 | (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, |
141 | otg_dev->vbus); | 141 | otg_dev->vbus); |
142 | 142 | ||
143 | platform_set_drvdata(pdev, otg_dev); | ||
144 | |||
143 | return 0; | 145 | return 0; |
144 | } | 146 | } |
145 | 147 | ||
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 8fbbc2d32371..ac67bab9124c 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c | |||
@@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) | |||
514 | if (gpio > 0) | 514 | if (gpio > 0) |
515 | dparam->enable_gpio = gpio; | 515 | dparam->enable_gpio = gpio; |
516 | 516 | ||
517 | if (dparam->type == USBHS_TYPE_RCAR_GEN2) | 517 | if (dparam->type == USBHS_TYPE_RCAR_GEN2 || |
518 | dparam->type == USBHS_TYPE_RCAR_GEN3) | ||
518 | dparam->has_usb_dmac = 1; | 519 | dparam->has_usb_dmac = 1; |
519 | 520 | ||
520 | return info; | 521 | return info; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 280ed5ff021b..857e78337324 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) | |||
871 | 871 | ||
872 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ | 872 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ |
873 | if ((len < usbhs_get_dparam(priv, pio_dma_border)) || | 873 | if ((len < usbhs_get_dparam(priv, pio_dma_border)) || |
874 | usbhs_pipe_is_dcp(pipe)) | 874 | usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) |
875 | goto usbhsf_pio_prepare_push; | 875 | goto usbhsf_pio_prepare_push; |
876 | 876 | ||
877 | /* check data length if this driver don't use USB-DMAC */ | 877 | /* check data length if this driver don't use USB-DMAC */ |
@@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, | |||
976 | 976 | ||
977 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ | 977 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ |
978 | if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || | 978 | if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || |
979 | usbhs_pipe_is_dcp(pipe)) | 979 | usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) |
980 | goto usbhsf_pio_prepare_pop; | 980 | goto usbhsf_pio_prepare_pop; |
981 | 981 | ||
982 | fifo = usbhsf_get_dma_fifo(priv, pkt); | 982 | fifo = usbhsf_get_dma_fifo(priv, pkt); |
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 50f3363cc382..92bc83b92d10 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
@@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep, | |||
617 | * use dmaengine if possible. | 617 | * use dmaengine if possible. |
618 | * It will use pio handler if impossible. | 618 | * It will use pio handler if impossible. |
619 | */ | 619 | */ |
620 | if (usb_endpoint_dir_in(desc)) | 620 | if (usb_endpoint_dir_in(desc)) { |
621 | pipe->handler = &usbhs_fifo_dma_push_handler; | 621 | pipe->handler = &usbhs_fifo_dma_push_handler; |
622 | else | 622 | } else { |
623 | pipe->handler = &usbhs_fifo_dma_pop_handler; | 623 | pipe->handler = &usbhs_fifo_dma_pop_handler; |
624 | usbhs_xxxsts_clear(priv, BRDYSTS, | ||
625 | usbhs_pipe_number(pipe)); | ||
626 | } | ||
624 | 627 | ||
625 | ret = 0; | 628 | ret = 0; |
626 | } | 629 | } |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 00820809139a..b2d767e743fc 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
648 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, | 648 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, |
649 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, | 649 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, |
650 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, | 650 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, |
651 | { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, | ||
652 | { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, | ||
651 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 653 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
652 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 654 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
653 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, | 655 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, |
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
1008 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, | 1010 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, |
1009 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, | 1011 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, |
1010 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, | 1012 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, |
1013 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, | ||
1011 | { } /* Terminating entry */ | 1014 | { } /* Terminating entry */ |
1012 | }; | 1015 | }; |
1013 | 1016 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c5d6c1e73e8e..f87a938cf005 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -406,6 +406,12 @@ | |||
406 | #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 | 406 | #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 |
407 | 407 | ||
408 | /* | 408 | /* |
409 | * Ivium Technologies product IDs | ||
410 | */ | ||
411 | #define FTDI_PALMSENS_PID 0xf440 | ||
412 | #define FTDI_IVIUM_XSTAT_PID 0xf441 | ||
413 | |||
414 | /* | ||
409 | * Linx Technologies product ids | 415 | * Linx Technologies product ids |
410 | */ | 416 | */ |
411 | #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ | 417 | #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ |
@@ -673,6 +679,12 @@ | |||
673 | #define INTREPID_NEOVI_PID 0x0701 | 679 | #define INTREPID_NEOVI_PID 0x0701 |
674 | 680 | ||
675 | /* | 681 | /* |
682 | * WICED USB UART | ||
683 | */ | ||
684 | #define WICED_VID 0x0A5C | ||
685 | #define WICED_USB20706V2_PID 0x6422 | ||
686 | |||
687 | /* | ||
676 | * Definitions for ID TECH (www.idt-net.com) devices | 688 | * Definitions for ID TECH (www.idt-net.com) devices |
677 | */ | 689 | */ |
678 | #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ | 690 | #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8e07536c233a..bc472584a229 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb); | |||
274 | #define TELIT_PRODUCT_LE920 0x1200 | 274 | #define TELIT_PRODUCT_LE920 0x1200 |
275 | #define TELIT_PRODUCT_LE910 0x1201 | 275 | #define TELIT_PRODUCT_LE910 0x1201 |
276 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 | 276 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 |
277 | #define TELIT_PRODUCT_LE920A4_1207 0x1207 | ||
278 | #define TELIT_PRODUCT_LE920A4_1208 0x1208 | ||
279 | #define TELIT_PRODUCT_LE920A4_1211 0x1211 | ||
280 | #define TELIT_PRODUCT_LE920A4_1212 0x1212 | ||
281 | #define TELIT_PRODUCT_LE920A4_1213 0x1213 | ||
282 | #define TELIT_PRODUCT_LE920A4_1214 0x1214 | ||
277 | 283 | ||
278 | /* ZTE PRODUCTS */ | 284 | /* ZTE PRODUCTS */ |
279 | #define ZTE_VENDOR_ID 0x19d2 | 285 | #define ZTE_VENDOR_ID 0x19d2 |
@@ -628,6 +634,11 @@ static const struct option_blacklist_info telit_le920_blacklist = { | |||
628 | .reserved = BIT(1) | BIT(5), | 634 | .reserved = BIT(1) | BIT(5), |
629 | }; | 635 | }; |
630 | 636 | ||
637 | static const struct option_blacklist_info telit_le920a4_blacklist_1 = { | ||
638 | .sendsetup = BIT(0), | ||
639 | .reserved = BIT(1), | ||
640 | }; | ||
641 | |||
631 | static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { | 642 | static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { |
632 | .sendsetup = BIT(2), | 643 | .sendsetup = BIT(2), |
633 | .reserved = BIT(0) | BIT(1) | BIT(3), | 644 | .reserved = BIT(0) | BIT(1) | BIT(3), |
@@ -1203,6 +1214,16 @@ static const struct usb_device_id option_ids[] = { | |||
1203 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | 1214 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, |
1204 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), | 1215 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), |
1205 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, | 1216 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, |
1217 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, | ||
1218 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), | ||
1219 | .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, | ||
1220 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), | ||
1221 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | ||
1222 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), | ||
1223 | .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, | ||
1224 | { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, | ||
1225 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), | ||
1226 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | ||
1206 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ | 1227 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
1207 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), | 1228 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), |
1208 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, | 1229 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
@@ -1966,6 +1987,7 @@ static const struct usb_device_id option_ids[] = { | |||
1966 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1987 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1967 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ | 1988 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ |
1968 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ | 1989 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ |
1990 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ | ||
1969 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ | 1991 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ |
1970 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, | 1992 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, |
1971 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, | 1993 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index b1b9bac44016..d213cf44a7e4 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] | |||
1433 | 1433 | ||
1434 | rc = usb_register(udriver); | 1434 | rc = usb_register(udriver); |
1435 | if (rc) | 1435 | if (rc) |
1436 | return rc; | 1436 | goto failed_usb_register; |
1437 | 1437 | ||
1438 | for (sd = serial_drivers; *sd; ++sd) { | 1438 | for (sd = serial_drivers; *sd; ++sd) { |
1439 | (*sd)->usb_driver = udriver; | 1439 | (*sd)->usb_driver = udriver; |
@@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] | |||
1451 | while (sd-- > serial_drivers) | 1451 | while (sd-- > serial_drivers) |
1452 | usb_serial_deregister(*sd); | 1452 | usb_serial_deregister(*sd); |
1453 | usb_deregister(udriver); | 1453 | usb_deregister(udriver); |
1454 | failed_usb_register: | ||
1455 | kfree(udriver); | ||
1454 | return rc; | 1456 | return rc; |
1455 | } | 1457 | } |
1456 | EXPORT_SYMBOL_GPL(usb_serial_register_drivers); | 1458 | EXPORT_SYMBOL_GPL(usb_serial_register_drivers); |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 15ecfc9c5f6c..152b43822ef1 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
@@ -564,67 +564,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, | |||
564 | } | 564 | } |
565 | 565 | ||
566 | static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, | 566 | static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, |
567 | uint32_t flags, void *data) | 567 | unsigned int count, uint32_t flags, |
568 | void *data) | ||
568 | { | 569 | { |
569 | int32_t fd = *(int32_t *)data; | ||
570 | |||
571 | if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) | ||
572 | return -EINVAL; | ||
573 | |||
574 | /* DATA_NONE/DATA_BOOL enables loopback testing */ | 570 | /* DATA_NONE/DATA_BOOL enables loopback testing */ |
575 | if (flags & VFIO_IRQ_SET_DATA_NONE) { | 571 | if (flags & VFIO_IRQ_SET_DATA_NONE) { |
576 | if (*ctx) | 572 | if (*ctx) { |
577 | eventfd_signal(*ctx, 1); | 573 | if (count) { |
578 | return 0; | 574 | eventfd_signal(*ctx, 1); |
575 | } else { | ||
576 | eventfd_ctx_put(*ctx); | ||
577 | *ctx = NULL; | ||
578 | } | ||
579 | return 0; | ||
580 | } | ||
579 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { | 581 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { |
580 | uint8_t trigger = *(uint8_t *)data; | 582 | uint8_t trigger; |
583 | |||
584 | if (!count) | ||
585 | return -EINVAL; | ||
586 | |||
587 | trigger = *(uint8_t *)data; | ||
581 | if (trigger && *ctx) | 588 | if (trigger && *ctx) |
582 | eventfd_signal(*ctx, 1); | 589 | eventfd_signal(*ctx, 1); |
583 | return 0; | ||
584 | } | ||
585 | 590 | ||
586 | /* Handle SET_DATA_EVENTFD */ | ||
587 | if (fd == -1) { | ||
588 | if (*ctx) | ||
589 | eventfd_ctx_put(*ctx); | ||
590 | *ctx = NULL; | ||
591 | return 0; | 591 | return 0; |
592 | } else if (fd >= 0) { | 592 | } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { |
593 | struct eventfd_ctx *efdctx; | 593 | int32_t fd; |
594 | efdctx = eventfd_ctx_fdget(fd); | 594 | |
595 | if (IS_ERR(efdctx)) | 595 | if (!count) |
596 | return PTR_ERR(efdctx); | 596 | return -EINVAL; |
597 | if (*ctx) | 597 | |
598 | eventfd_ctx_put(*ctx); | 598 | fd = *(int32_t *)data; |
599 | *ctx = efdctx; | 599 | if (fd == -1) { |
600 | if (*ctx) | ||
601 | eventfd_ctx_put(*ctx); | ||
602 | *ctx = NULL; | ||
603 | } else if (fd >= 0) { | ||
604 | struct eventfd_ctx *efdctx; | ||
605 | |||
606 | efdctx = eventfd_ctx_fdget(fd); | ||
607 | if (IS_ERR(efdctx)) | ||
608 | return PTR_ERR(efdctx); | ||
609 | |||
610 | if (*ctx) | ||
611 | eventfd_ctx_put(*ctx); | ||
612 | |||
613 | *ctx = efdctx; | ||
614 | } | ||
600 | return 0; | 615 | return 0; |
601 | } else | 616 | } |
602 | return -EINVAL; | 617 | |
618 | return -EINVAL; | ||
603 | } | 619 | } |
604 | 620 | ||
605 | static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, | 621 | static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, |
606 | unsigned index, unsigned start, | 622 | unsigned index, unsigned start, |
607 | unsigned count, uint32_t flags, void *data) | 623 | unsigned count, uint32_t flags, void *data) |
608 | { | 624 | { |
609 | if (index != VFIO_PCI_ERR_IRQ_INDEX) | 625 | if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) |
610 | return -EINVAL; | 626 | return -EINVAL; |
611 | 627 | ||
612 | /* | 628 | return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, |
613 | * We should sanitize start & count, but that wasn't caught | 629 | count, flags, data); |
614 | * originally, so this IRQ index must forever ignore them :-( | ||
615 | */ | ||
616 | |||
617 | return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data); | ||
618 | } | 630 | } |
619 | 631 | ||
620 | static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, | 632 | static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, |
621 | unsigned index, unsigned start, | 633 | unsigned index, unsigned start, |
622 | unsigned count, uint32_t flags, void *data) | 634 | unsigned count, uint32_t flags, void *data) |
623 | { | 635 | { |
624 | if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1) | 636 | if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1) |
625 | return -EINVAL; | 637 | return -EINVAL; |
626 | 638 | ||
627 | return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data); | 639 | return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, |
640 | count, flags, data); | ||
628 | } | 641 | } |
629 | 642 | ||
630 | int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, | 643 | int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 9d6320e8ff3e..6e29d053843d 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -88,7 +88,7 @@ struct vhost_scsi_cmd { | |||
88 | struct scatterlist *tvc_prot_sgl; | 88 | struct scatterlist *tvc_prot_sgl; |
89 | struct page **tvc_upages; | 89 | struct page **tvc_upages; |
90 | /* Pointer to response header iovec */ | 90 | /* Pointer to response header iovec */ |
91 | struct iovec *tvc_resp_iov; | 91 | struct iovec tvc_resp_iov; |
92 | /* Pointer to vhost_scsi for our device */ | 92 | /* Pointer to vhost_scsi for our device */ |
93 | struct vhost_scsi *tvc_vhost; | 93 | struct vhost_scsi *tvc_vhost; |
94 | /* Pointer to vhost_virtqueue for the cmd */ | 94 | /* Pointer to vhost_virtqueue for the cmd */ |
@@ -547,7 +547,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) | |||
547 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, | 547 | memcpy(v_rsp.sense, cmd->tvc_sense_buf, |
548 | se_cmd->scsi_sense_length); | 548 | se_cmd->scsi_sense_length); |
549 | 549 | ||
550 | iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, | 550 | iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov, |
551 | cmd->tvc_in_iovs, sizeof(v_rsp)); | 551 | cmd->tvc_in_iovs, sizeof(v_rsp)); |
552 | ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); | 552 | ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); |
553 | if (likely(ret == sizeof(v_rsp))) { | 553 | if (likely(ret == sizeof(v_rsp))) { |
@@ -1044,7 +1044,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
1044 | } | 1044 | } |
1045 | cmd->tvc_vhost = vs; | 1045 | cmd->tvc_vhost = vs; |
1046 | cmd->tvc_vq = vq; | 1046 | cmd->tvc_vq = vq; |
1047 | cmd->tvc_resp_iov = &vq->iov[out]; | 1047 | cmd->tvc_resp_iov = vq->iov[out]; |
1048 | cmd->tvc_in_iovs = in; | 1048 | cmd->tvc_in_iovs = in; |
1049 | 1049 | ||
1050 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", | 1050 | pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", |
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 388eec4e1a90..97fb2f8fa930 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c | |||
@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n) | |||
220 | { | 220 | { |
221 | void *priv = NULL; | 221 | void *priv = NULL; |
222 | long err; | 222 | long err; |
223 | struct vhost_memory *memory; | 223 | struct vhost_umem *umem; |
224 | 224 | ||
225 | mutex_lock(&n->dev.mutex); | 225 | mutex_lock(&n->dev.mutex); |
226 | err = vhost_dev_check_owner(&n->dev); | 226 | err = vhost_dev_check_owner(&n->dev); |
227 | if (err) | 227 | if (err) |
228 | goto done; | 228 | goto done; |
229 | memory = vhost_dev_reset_owner_prepare(); | 229 | umem = vhost_dev_reset_owner_prepare(); |
230 | if (!memory) { | 230 | if (!umem) { |
231 | err = -ENOMEM; | 231 | err = -ENOMEM; |
232 | goto done; | 232 | goto done; |
233 | } | 233 | } |
234 | vhost_test_stop(n, &priv); | 234 | vhost_test_stop(n, &priv); |
235 | vhost_test_flush(n); | 235 | vhost_test_flush(n); |
236 | vhost_dev_reset_owner(&n->dev, memory); | 236 | vhost_dev_reset_owner(&n->dev, umem); |
237 | done: | 237 | done: |
238 | mutex_unlock(&n->dev.mutex); | 238 | mutex_unlock(&n->dev.mutex); |
239 | return err; | 239 | return err; |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 0ddf3a2dbfc4..e3b30ea9ece5 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
@@ -307,6 +307,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) | |||
307 | 307 | ||
308 | vhost_disable_notify(&vsock->dev, vq); | 308 | vhost_disable_notify(&vsock->dev, vq); |
309 | for (;;) { | 309 | for (;;) { |
310 | u32 len; | ||
311 | |||
310 | if (!vhost_vsock_more_replies(vsock)) { | 312 | if (!vhost_vsock_more_replies(vsock)) { |
311 | /* Stop tx until the device processes already | 313 | /* Stop tx until the device processes already |
312 | * pending replies. Leave tx virtqueue | 314 | * pending replies. Leave tx virtqueue |
@@ -334,13 +336,15 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) | |||
334 | continue; | 336 | continue; |
335 | } | 337 | } |
336 | 338 | ||
339 | len = pkt->len; | ||
340 | |||
337 | /* Only accept correctly addressed packets */ | 341 | /* Only accept correctly addressed packets */ |
338 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) | 342 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) |
339 | virtio_transport_recv_pkt(pkt); | 343 | virtio_transport_recv_pkt(pkt); |
340 | else | 344 | else |
341 | virtio_transport_free_pkt(pkt); | 345 | virtio_transport_free_pkt(pkt); |
342 | 346 | ||
343 | vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); | 347 | vhost_add_used(vq, head, sizeof(pkt->hdr) + len); |
344 | added = true; | 348 | added = true; |
345 | } | 349 | } |
346 | 350 | ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 114a0c88afb8..e383ecdaca59 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -327,6 +327,8 @@ static inline int virtqueue_add(struct virtqueue *_vq, | |||
327 | * host should service the ring ASAP. */ | 327 | * host should service the ring ASAP. */ |
328 | if (out_sgs) | 328 | if (out_sgs) |
329 | vq->notify(&vq->vq); | 329 | vq->notify(&vq->vq); |
330 | if (indirect) | ||
331 | kfree(desc); | ||
330 | END_USE(vq); | 332 | END_USE(vq); |
331 | return -ENOSPC; | 333 | return -ENOSPC; |
332 | } | 334 | } |
@@ -426,6 +428,7 @@ unmap_release: | |||
426 | if (indirect) | 428 | if (indirect) |
427 | kfree(desc); | 429 | kfree(desc); |
428 | 430 | ||
431 | END_USE(vq); | ||
429 | return -EIO; | 432 | return -EIO; |
430 | } | 433 | } |
431 | 434 | ||
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 7487971f9f78..c1010f018bd8 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c | |||
@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type, | |||
316 | rc = -ENOMEM; | 316 | rc = -ENOMEM; |
317 | goto out; | 317 | goto out; |
318 | } | 318 | } |
319 | } else { | 319 | } else if (msg_type == XS_TRANSACTION_END) { |
320 | list_for_each_entry(trans, &u->transactions, list) | 320 | list_for_each_entry(trans, &u->transactions, list) |
321 | if (trans->handle.id == u->u.msg.tx_id) | 321 | if (trans->handle.id == u->u.msg.tx_id) |
322 | break; | 322 | break; |